1
2
3
4
5#include <linux/spinlock.h>
6#include <linux/export.h>
7
8#define RT_MUTEX_BUILD_MUTEX
9#include "rtmutex.c"
10
11
12
13
14int max_lock_depth = 1024;
15
16
17
18
19
20
21
22static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
23 unsigned int state,
24 struct lockdep_map *nest_lock,
25 unsigned int subclass)
26{
27 int ret;
28
29 might_sleep();
30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
31 ret = __rt_mutex_lock(&lock->rtmutex, state);
32 if (ret)
33 mutex_release(&lock->dep_map, _RET_IP_);
34 return ret;
35}
36
37void rt_mutex_base_init(struct rt_mutex_base *rtb)
38{
39 __rt_mutex_base_init(rtb);
40}
41EXPORT_SYMBOL(rt_mutex_base_init);
42
43#ifdef CONFIG_DEBUG_LOCK_ALLOC
44
45
46
47
48
49
50void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
51{
52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
53}
54EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
55
56void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
57{
58 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
59}
60EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
61
62#else
63
64
65
66
67
68
69void __sched rt_mutex_lock(struct rt_mutex *lock)
70{
71 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
72}
73EXPORT_SYMBOL_GPL(rt_mutex_lock);
74#endif
75
76
77
78
79
80
81
82
83
84
85int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
86{
87 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
88}
89EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
90
91
92
93
94
95
96
97
98
99
100int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
101{
102 return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
103}
104EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
105
106
107
108
109
110
111
112
113
114
115
116
117
118int __sched rt_mutex_trylock(struct rt_mutex *lock)
119{
120 int ret;
121
122 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
123 return 0;
124
125 ret = __rt_mutex_trylock(&lock->rtmutex);
126 if (ret)
127 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
128
129 return ret;
130}
131EXPORT_SYMBOL_GPL(rt_mutex_trylock);
132
133
134
135
136
137
138void __sched rt_mutex_unlock(struct rt_mutex *lock)
139{
140 mutex_release(&lock->dep_map, _RET_IP_);
141 __rt_mutex_unlock(&lock->rtmutex);
142}
143EXPORT_SYMBOL_GPL(rt_mutex_unlock);
144
145
146
147
148int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
149{
150 return rt_mutex_slowtrylock(lock);
151}
152
153int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
154{
155 return __rt_mutex_slowtrylock(lock);
156}
157
158
159
160
161
162
163
164
165bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
166 struct rt_wake_q_head *wqh)
167{
168 lockdep_assert_held(&lock->wait_lock);
169
170 debug_rt_mutex_unlock(lock);
171
172 if (!rt_mutex_has_waiters(lock)) {
173 lock->owner = NULL;
174 return false;
175 }
176
177
178
179
180
181
182
183 mark_wakeup_next_waiter(wqh, lock);
184
185 return true;
186}
187
188void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
189{
190 DEFINE_RT_WAKE_Q(wqh);
191 unsigned long flags;
192 bool postunlock;
193
194 raw_spin_lock_irqsave(&lock->wait_lock, flags);
195 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
196 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
197
198 if (postunlock)
199 rt_mutex_postunlock(&wqh);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
214 struct lock_class_key *key)
215{
216 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
217 __rt_mutex_base_init(&lock->rtmutex);
218 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
219}
220EXPORT_SYMBOL_GPL(__rt_mutex_init);
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
237 struct task_struct *proxy_owner)
238{
239 static struct lock_class_key pi_futex_key;
240
241 __rt_mutex_base_init(lock);
242
243
244
245
246
247
248
249
250
251 lockdep_set_class(&lock->wait_lock, &pi_futex_key);
252 rt_mutex_set_owner(lock, proxy_owner);
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
268{
269 debug_rt_mutex_proxy_unlock(lock);
270 rt_mutex_set_owner(lock, NULL);
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
293 struct rt_mutex_waiter *waiter,
294 struct task_struct *task)
295{
296 int ret;
297
298 lockdep_assert_held(&lock->wait_lock);
299
300 if (try_to_take_rt_mutex(lock, task, NULL))
301 return 1;
302
303
304 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
305 RT_MUTEX_FULL_CHAINWALK);
306
307 if (ret && !rt_mutex_owner(lock)) {
308
309
310
311
312
313
314 ret = 0;
315 }
316
317 return ret;
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
340 struct rt_mutex_waiter *waiter,
341 struct task_struct *task)
342{
343 int ret;
344
345 raw_spin_lock_irq(&lock->wait_lock);
346 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
347 if (unlikely(ret))
348 remove_waiter(lock, waiter);
349 raw_spin_unlock_irq(&lock->wait_lock);
350
351 return ret;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
372 struct hrtimer_sleeper *to,
373 struct rt_mutex_waiter *waiter)
374{
375 int ret;
376
377 raw_spin_lock_irq(&lock->wait_lock);
378
379 set_current_state(TASK_INTERRUPTIBLE);
380 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
381
382
383
384
385 fixup_rt_mutex_waiters(lock);
386 raw_spin_unlock_irq(&lock->wait_lock);
387
388 return ret;
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
412 struct rt_mutex_waiter *waiter)
413{
414 bool cleanup = false;
415
416 raw_spin_lock_irq(&lock->wait_lock);
417
418
419
420
421
422
423
424
425
426
427
428 try_to_take_rt_mutex(lock, current, waiter);
429
430
431
432
433 if (rt_mutex_owner(lock) != current) {
434 remove_waiter(lock, waiter);
435 cleanup = true;
436 }
437
438
439
440
441 fixup_rt_mutex_waiters(lock);
442
443 raw_spin_unlock_irq(&lock->wait_lock);
444
445 return cleanup;
446}
447
448
449
450
451
452
453void __sched rt_mutex_adjust_pi(struct task_struct *task)
454{
455 struct rt_mutex_waiter *waiter;
456 struct rt_mutex_base *next_lock;
457 unsigned long flags;
458
459 raw_spin_lock_irqsave(&task->pi_lock, flags);
460
461 waiter = task->pi_blocked_on;
462 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
463 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
464 return;
465 }
466 next_lock = waiter->lock;
467 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
468
469
470 get_task_struct(task);
471
472 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
473 next_lock, NULL, task);
474}
475
476
477
478
479void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
480{
481 rt_mutex_wake_up_q(wqh);
482}
483
484#ifdef CONFIG_DEBUG_RT_MUTEXES
485void rt_mutex_debug_task_free(struct task_struct *task)
486{
487 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
488 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
489}
490#endif
491
492#ifdef CONFIG_PREEMPT_RT
493
494void __mutex_rt_init(struct mutex *mutex, const char *name,
495 struct lock_class_key *key)
496{
497 debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
498 lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
499}
500EXPORT_SYMBOL(__mutex_rt_init);
501
502static __always_inline int __mutex_lock_common(struct mutex *lock,
503 unsigned int state,
504 unsigned int subclass,
505 struct lockdep_map *nest_lock,
506 unsigned long ip)
507{
508 int ret;
509
510 might_sleep();
511 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
512 ret = __rt_mutex_lock(&lock->rtmutex, state);
513 if (ret)
514 mutex_release(&lock->dep_map, ip);
515 else
516 lock_acquired(&lock->dep_map, ip);
517 return ret;
518}
519
520#ifdef CONFIG_DEBUG_LOCK_ALLOC
521void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
522{
523 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
524}
525EXPORT_SYMBOL_GPL(mutex_lock_nested);
526
527void __sched _mutex_lock_nest_lock(struct mutex *lock,
528 struct lockdep_map *nest_lock)
529{
530 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
531}
532EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
533
534int __sched mutex_lock_interruptible_nested(struct mutex *lock,
535 unsigned int subclass)
536{
537 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
538}
539EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
540
541int __sched mutex_lock_killable_nested(struct mutex *lock,
542 unsigned int subclass)
543{
544 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
545}
546EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
547
548void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
549{
550 int token;
551
552 might_sleep();
553
554 token = io_schedule_prepare();
555 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
556 io_schedule_finish(token);
557}
558EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
559
560#else
561
562void __sched mutex_lock(struct mutex *lock)
563{
564 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
565}
566EXPORT_SYMBOL(mutex_lock);
567
568int __sched mutex_lock_interruptible(struct mutex *lock)
569{
570 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
571}
572EXPORT_SYMBOL(mutex_lock_interruptible);
573
574int __sched mutex_lock_killable(struct mutex *lock)
575{
576 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
577}
578EXPORT_SYMBOL(mutex_lock_killable);
579
580void __sched mutex_lock_io(struct mutex *lock)
581{
582 int token = io_schedule_prepare();
583
584 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
585 io_schedule_finish(token);
586}
587EXPORT_SYMBOL(mutex_lock_io);
588#endif
589
590int __sched mutex_trylock(struct mutex *lock)
591{
592 int ret;
593
594 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
595 return 0;
596
597 ret = __rt_mutex_trylock(&lock->rtmutex);
598 if (ret)
599 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
600
601 return ret;
602}
603EXPORT_SYMBOL(mutex_trylock);
604
605void __sched mutex_unlock(struct mutex *lock)
606{
607 mutex_release(&lock->dep_map, _RET_IP_);
608 __rt_mutex_unlock(&lock->rtmutex);
609}
610EXPORT_SYMBOL(mutex_unlock);
611
612#endif
613