1
2
3#ifndef WW_RT
4
5#define MUTEX mutex
6#define MUTEX_WAITER mutex_waiter
7
8static inline struct mutex_waiter *
9__ww_waiter_first(struct mutex *lock)
10{
11 struct mutex_waiter *w;
12
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
14 if (list_entry_is_head(w, &lock->wait_list, list))
15 return NULL;
16
17 return w;
18}
19
20static inline struct mutex_waiter *
21__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
22{
23 w = list_next_entry(w, list);
24 if (list_entry_is_head(w, &lock->wait_list, list))
25 return NULL;
26
27 return w;
28}
29
30static inline struct mutex_waiter *
31__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
32{
33 w = list_prev_entry(w, list);
34 if (list_entry_is_head(w, &lock->wait_list, list))
35 return NULL;
36
37 return w;
38}
39
40static inline struct mutex_waiter *
41__ww_waiter_last(struct mutex *lock)
42{
43 struct mutex_waiter *w;
44
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
46 if (list_entry_is_head(w, &lock->wait_list, list))
47 return NULL;
48
49 return w;
50}
51
52static inline void
53__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
54{
55 struct list_head *p = &lock->wait_list;
56 if (pos)
57 p = &pos->list;
58 __mutex_add_waiter(lock, waiter, p);
59}
60
61static inline struct task_struct *
62__ww_mutex_owner(struct mutex *lock)
63{
64 return __mutex_owner(lock);
65}
66
67static inline bool
68__ww_mutex_has_waiters(struct mutex *lock)
69{
70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
71}
72
73static inline void lock_wait_lock(struct mutex *lock)
74{
75 raw_spin_lock(&lock->wait_lock);
76}
77
78static inline void unlock_wait_lock(struct mutex *lock)
79{
80 raw_spin_unlock(&lock->wait_lock);
81}
82
83static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
84{
85 lockdep_assert_held(&lock->wait_lock);
86}
87
88#else
89
90#define MUTEX rt_mutex
91#define MUTEX_WAITER rt_mutex_waiter
92
93static inline struct rt_mutex_waiter *
94__ww_waiter_first(struct rt_mutex *lock)
95{
96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
97 if (!n)
98 return NULL;
99 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
100}
101
102static inline struct rt_mutex_waiter *
103__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
104{
105 struct rb_node *n = rb_next(&w->tree_entry);
106 if (!n)
107 return NULL;
108 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
109}
110
111static inline struct rt_mutex_waiter *
112__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
113{
114 struct rb_node *n = rb_prev(&w->tree_entry);
115 if (!n)
116 return NULL;
117 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
118}
119
120static inline struct rt_mutex_waiter *
121__ww_waiter_last(struct rt_mutex *lock)
122{
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
124 if (!n)
125 return NULL;
126 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
127}
128
129static inline void
130__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
131{
132
133}
134
135static inline struct task_struct *
136__ww_mutex_owner(struct rt_mutex *lock)
137{
138 return rt_mutex_owner(&lock->rtmutex);
139}
140
141static inline bool
142__ww_mutex_has_waiters(struct rt_mutex *lock)
143{
144 return rt_mutex_has_waiters(&lock->rtmutex);
145}
146
147static inline void lock_wait_lock(struct rt_mutex *lock)
148{
149 raw_spin_lock(&lock->rtmutex.wait_lock);
150}
151
152static inline void unlock_wait_lock(struct rt_mutex *lock)
153{
154 raw_spin_unlock(&lock->rtmutex.wait_lock);
155}
156
157static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
158{
159 lockdep_assert_held(&lock->rtmutex.wait_lock);
160}
161
162#endif
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static __always_inline void
181ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
182{
183#ifdef DEBUG_WW_MUTEXES
184
185
186
187
188
189
190 DEBUG_LOCKS_WARN_ON(ww->ctx);
191
192
193
194
195 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
196
197 if (ww_ctx->contending_lock) {
198
199
200
201
202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
203
204
205
206
207
208 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
209 ww_ctx->contending_lock = NULL;
210 }
211
212
213
214
215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
216#endif
217 ww_ctx->acquired++;
218 ww->ctx = ww_ctx;
219}
220
221
222
223
224
225
226
227static inline bool
228__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
229{
230
231
232
233
234
235#ifdef WW_RT
236
237 int a_prio = a->task->prio;
238 int b_prio = b->task->prio;
239
240 if (rt_prio(a_prio) || rt_prio(b_prio)) {
241
242 if (a_prio > b_prio)
243 return true;
244
245 if (a_prio < b_prio)
246 return false;
247
248
249
250 if (dl_prio(a_prio)) {
251 if (dl_time_before(b->task->dl.deadline,
252 a->task->dl.deadline))
253 return true;
254
255 if (dl_time_before(a->task->dl.deadline,
256 b->task->dl.deadline))
257 return false;
258 }
259
260
261 }
262#endif
263
264
265 return (signed long)(a->stamp - b->stamp) > 0;
266}
267
268
269
270
271
272
273
274
275
276static bool
277__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
278 struct ww_acquire_ctx *ww_ctx)
279{
280 if (!ww_ctx->is_wait_die)
281 return false;
282
283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
284#ifndef WW_RT
285 debug_mutex_wake_waiter(lock, waiter);
286#endif
287 wake_up_process(waiter->task);
288 }
289
290 return true;
291}
292
293
294
295
296
297
298
299
300static bool __ww_mutex_wound(struct MUTEX *lock,
301 struct ww_acquire_ctx *ww_ctx,
302 struct ww_acquire_ctx *hold_ctx)
303{
304 struct task_struct *owner = __ww_mutex_owner(lock);
305
306 lockdep_assert_wait_lock_held(lock);
307
308
309
310
311
312
313 if (!hold_ctx)
314 return false;
315
316
317
318
319
320
321 if (!owner)
322 return false;
323
324 if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
325 hold_ctx->wounded = 1;
326
327
328
329
330
331
332
333 if (owner != current)
334 wake_up_process(owner);
335
336 return true;
337 }
338
339 return false;
340}
341
342
343
344
345
346
347
348
349
350
351
352
353
354static void
355__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
356{
357 struct MUTEX_WAITER *cur;
358
359 lockdep_assert_wait_lock_held(lock);
360
361 for (cur = __ww_waiter_first(lock); cur;
362 cur = __ww_waiter_next(lock, cur)) {
363
364 if (!cur->ww_ctx)
365 continue;
366
367 if (__ww_mutex_die(lock, cur, ww_ctx) ||
368 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
369 break;
370 }
371}
372
373
374
375
376
377static __always_inline void
378ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
379{
380 ww_mutex_lock_acquired(lock, ctx);
381
382
383
384
385
386
387
388
389 smp_mb();
390
391
392
393
394
395
396
397
398
399
400 if (likely(!__ww_mutex_has_waiters(&lock->base)))
401 return;
402
403
404
405
406
407 lock_wait_lock(&lock->base);
408 __ww_mutex_check_waiters(&lock->base, ctx);
409 unlock_wait_lock(&lock->base);
410}
411
412static __always_inline int
413__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
414{
415 if (ww_ctx->acquired > 0) {
416#ifdef DEBUG_WW_MUTEXES
417 struct ww_mutex *ww;
418
419 ww = container_of(lock, struct ww_mutex, base);
420 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
421 ww_ctx->contending_lock = ww;
422#endif
423 return -EDEADLK;
424 }
425
426 return 0;
427}
428
429
430
431
432
433
434
435
436
437
438
439
440static inline int
441__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
442 struct ww_acquire_ctx *ctx)
443{
444 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
445 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
446 struct MUTEX_WAITER *cur;
447
448 if (ctx->acquired == 0)
449 return 0;
450
451 if (!ctx->is_wait_die) {
452 if (ctx->wounded)
453 return __ww_mutex_kill(lock, ctx);
454
455 return 0;
456 }
457
458 if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
459 return __ww_mutex_kill(lock, ctx);
460
461
462
463
464
465 for (cur = __ww_waiter_prev(lock, waiter); cur;
466 cur = __ww_waiter_prev(lock, cur)) {
467
468 if (!cur->ww_ctx)
469 continue;
470
471 return __ww_mutex_kill(lock, ctx);
472 }
473
474 return 0;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488static inline int
489__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
490 struct MUTEX *lock,
491 struct ww_acquire_ctx *ww_ctx)
492{
493 struct MUTEX_WAITER *cur, *pos = NULL;
494 bool is_wait_die;
495
496 if (!ww_ctx) {
497 __ww_waiter_add(lock, waiter, NULL);
498 return 0;
499 }
500
501 is_wait_die = ww_ctx->is_wait_die;
502
503
504
505
506
507
508
509
510 for (cur = __ww_waiter_last(lock); cur;
511 cur = __ww_waiter_prev(lock, cur)) {
512
513 if (!cur->ww_ctx)
514 continue;
515
516 if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
517
518
519
520
521
522 if (is_wait_die) {
523 int ret = __ww_mutex_kill(lock, ww_ctx);
524
525 if (ret)
526 return ret;
527 }
528
529 break;
530 }
531
532 pos = cur;
533
534
535 __ww_mutex_die(lock, cur, ww_ctx);
536 }
537
538 __ww_waiter_add(lock, waiter, pos);
539
540
541
542
543
544 if (!is_wait_die) {
545 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
546
547
548
549
550
551
552 smp_mb();
553 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
554 }
555
556 return 0;
557}
558
559static inline void __ww_mutex_unlock(struct ww_mutex *lock)
560{
561 if (lock->ctx) {
562#ifdef DEBUG_WW_MUTEXES
563 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
564#endif
565 if (lock->ctx->acquired > 0)
566 lock->ctx->acquired--;
567 lock->ctx = NULL;
568 }
569}
570