1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/dma-resv.h>
37#include <linux/export.h>
38#include <linux/mm.h>
39#include <linux/sched/mm.h>
40#include <linux/mmu_notifier.h>
41
42
43
44
45
46
47
48
49
50
51
52
53DEFINE_WD_CLASS(reservation_ww_class);
54EXPORT_SYMBOL(reservation_ww_class);
55
56
57
58
59
60
61
62
63static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
64{
65 struct dma_resv_list *list;
66
67 list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
68 if (!list)
69 return NULL;
70
71 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
72 sizeof(*list->shared);
73
74 return list;
75}
76
77
78
79
80
81
82
83static void dma_resv_list_free(struct dma_resv_list *list)
84{
85 unsigned int i;
86
87 if (!list)
88 return;
89
90 for (i = 0; i < list->shared_count; ++i)
91 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
92
93 kfree_rcu(list, rcu);
94}
95
96
97
98
99
100void dma_resv_init(struct dma_resv *obj)
101{
102 ww_mutex_init(&obj->lock, &reservation_ww_class);
103 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
104
105 RCU_INIT_POINTER(obj->fence, NULL);
106 RCU_INIT_POINTER(obj->fence_excl, NULL);
107}
108EXPORT_SYMBOL(dma_resv_init);
109
110
111
112
113
114void dma_resv_fini(struct dma_resv *obj)
115{
116 struct dma_resv_list *fobj;
117 struct dma_fence *excl;
118
119
120
121
122
123 excl = rcu_dereference_protected(obj->fence_excl, 1);
124 if (excl)
125 dma_fence_put(excl);
126
127 fobj = rcu_dereference_protected(obj->fence, 1);
128 dma_resv_list_free(fobj);
129 ww_mutex_destroy(&obj->lock);
130}
131EXPORT_SYMBOL(dma_resv_fini);
132
133
134
135
136
137
138
139
140
141
142
143
144
145int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
146{
147 struct dma_resv_list *old, *new;
148 unsigned int i, j, k, max;
149
150 dma_resv_assert_held(obj);
151
152 old = dma_resv_shared_list(obj);
153 if (old && old->shared_max) {
154 if ((old->shared_count + num_fences) <= old->shared_max)
155 return 0;
156 max = max(old->shared_count + num_fences, old->shared_max * 2);
157 } else {
158 max = max(4ul, roundup_pow_of_two(num_fences));
159 }
160
161 new = dma_resv_list_alloc(max);
162 if (!new)
163 return -ENOMEM;
164
165
166
167
168
169
170
171 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
172 struct dma_fence *fence;
173
174 fence = rcu_dereference_protected(old->shared[i],
175 dma_resv_held(obj));
176 if (dma_fence_is_signaled(fence))
177 RCU_INIT_POINTER(new->shared[--k], fence);
178 else
179 RCU_INIT_POINTER(new->shared[j++], fence);
180 }
181 new->shared_count = j;
182
183
184
185
186
187
188
189
190
191 rcu_assign_pointer(obj->fence, new);
192
193 if (!old)
194 return 0;
195
196
197 for (i = k; i < max; ++i) {
198 struct dma_fence *fence;
199
200 fence = rcu_dereference_protected(new->shared[i],
201 dma_resv_held(obj));
202 dma_fence_put(fence);
203 }
204 kfree_rcu(old, rcu);
205
206 return 0;
207}
208EXPORT_SYMBOL(dma_resv_reserve_shared);
209
210#ifdef CONFIG_DEBUG_MUTEXES
211
212
213
214
215
216
217
218
219void dma_resv_reset_shared_max(struct dma_resv *obj)
220{
221 struct dma_resv_list *fences = dma_resv_shared_list(obj);
222
223 dma_resv_assert_held(obj);
224
225
226 if (fences)
227 fences->shared_max = fences->shared_count;
228}
229EXPORT_SYMBOL(dma_resv_reset_shared_max);
230#endif
231
232
233
234
235
236
237
238
239
240void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
241{
242 struct dma_resv_list *fobj;
243 struct dma_fence *old;
244 unsigned int i, count;
245
246 dma_fence_get(fence);
247
248 dma_resv_assert_held(obj);
249
250 fobj = dma_resv_shared_list(obj);
251 count = fobj->shared_count;
252
253 write_seqcount_begin(&obj->seq);
254
255 for (i = 0; i < count; ++i) {
256
257 old = rcu_dereference_protected(fobj->shared[i],
258 dma_resv_held(obj));
259 if (old->context == fence->context ||
260 dma_fence_is_signaled(old))
261 goto replace;
262 }
263
264 BUG_ON(fobj->shared_count >= fobj->shared_max);
265 old = NULL;
266 count++;
267
268replace:
269 RCU_INIT_POINTER(fobj->shared[i], fence);
270
271 smp_store_mb(fobj->shared_count, count);
272
273 write_seqcount_end(&obj->seq);
274 dma_fence_put(old);
275}
276EXPORT_SYMBOL(dma_resv_add_shared_fence);
277
278
279
280
281
282
283
284
285void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
286{
287 struct dma_fence *old_fence = dma_resv_excl_fence(obj);
288 struct dma_resv_list *old;
289 u32 i = 0;
290
291 dma_resv_assert_held(obj);
292
293 old = dma_resv_shared_list(obj);
294 if (old)
295 i = old->shared_count;
296
297 if (fence)
298 dma_fence_get(fence);
299
300 write_seqcount_begin(&obj->seq);
301
302 RCU_INIT_POINTER(obj->fence_excl, fence);
303 if (old)
304 old->shared_count = 0;
305 write_seqcount_end(&obj->seq);
306
307
308 while (i--)
309 dma_fence_put(rcu_dereference_protected(old->shared[i],
310 dma_resv_held(obj)));
311
312 dma_fence_put(old_fence);
313}
314EXPORT_SYMBOL(dma_resv_add_excl_fence);
315
316
317
318
319
320
321
322
323int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
324{
325 struct dma_resv_list *src_list, *dst_list;
326 struct dma_fence *old, *new;
327 unsigned int i;
328
329 dma_resv_assert_held(dst);
330
331 rcu_read_lock();
332 src_list = dma_resv_shared_list(src);
333
334retry:
335 if (src_list) {
336 unsigned int shared_count = src_list->shared_count;
337
338 rcu_read_unlock();
339
340 dst_list = dma_resv_list_alloc(shared_count);
341 if (!dst_list)
342 return -ENOMEM;
343
344 rcu_read_lock();
345 src_list = dma_resv_shared_list(src);
346 if (!src_list || src_list->shared_count > shared_count) {
347 kfree(dst_list);
348 goto retry;
349 }
350
351 dst_list->shared_count = 0;
352 for (i = 0; i < src_list->shared_count; ++i) {
353 struct dma_fence __rcu **dst;
354 struct dma_fence *fence;
355
356 fence = rcu_dereference(src_list->shared[i]);
357 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
358 &fence->flags))
359 continue;
360
361 if (!dma_fence_get_rcu(fence)) {
362 dma_resv_list_free(dst_list);
363 src_list = dma_resv_shared_list(src);
364 goto retry;
365 }
366
367 if (dma_fence_is_signaled(fence)) {
368 dma_fence_put(fence);
369 continue;
370 }
371
372 dst = &dst_list->shared[dst_list->shared_count++];
373 rcu_assign_pointer(*dst, fence);
374 }
375 } else {
376 dst_list = NULL;
377 }
378
379 new = dma_fence_get_rcu_safe(&src->fence_excl);
380 rcu_read_unlock();
381
382 src_list = dma_resv_shared_list(dst);
383 old = dma_resv_excl_fence(dst);
384
385 write_seqcount_begin(&dst->seq);
386
387 RCU_INIT_POINTER(dst->fence_excl, new);
388 RCU_INIT_POINTER(dst->fence, dst_list);
389 write_seqcount_end(&dst->seq);
390
391 dma_resv_list_free(src_list);
392 dma_fence_put(old);
393
394 return 0;
395}
396EXPORT_SYMBOL(dma_resv_copy_fences);
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
412 unsigned int *pshared_count,
413 struct dma_fence ***pshared)
414{
415 struct dma_fence **shared = NULL;
416 struct dma_fence *fence_excl;
417 unsigned int shared_count;
418 int ret = 1;
419
420 do {
421 struct dma_resv_list *fobj;
422 unsigned int i, seq;
423 size_t sz = 0;
424
425 shared_count = i = 0;
426
427 rcu_read_lock();
428 seq = read_seqcount_begin(&obj->seq);
429
430 fence_excl = dma_resv_excl_fence(obj);
431 if (fence_excl && !dma_fence_get_rcu(fence_excl))
432 goto unlock;
433
434 fobj = dma_resv_shared_list(obj);
435 if (fobj)
436 sz += sizeof(*shared) * fobj->shared_max;
437
438 if (!pfence_excl && fence_excl)
439 sz += sizeof(*shared);
440
441 if (sz) {
442 struct dma_fence **nshared;
443
444 nshared = krealloc(shared, sz,
445 GFP_NOWAIT | __GFP_NOWARN);
446 if (!nshared) {
447 rcu_read_unlock();
448
449 dma_fence_put(fence_excl);
450 fence_excl = NULL;
451
452 nshared = krealloc(shared, sz, GFP_KERNEL);
453 if (nshared) {
454 shared = nshared;
455 continue;
456 }
457
458 ret = -ENOMEM;
459 break;
460 }
461 shared = nshared;
462 shared_count = fobj ? fobj->shared_count : 0;
463 for (i = 0; i < shared_count; ++i) {
464 shared[i] = rcu_dereference(fobj->shared[i]);
465 if (!dma_fence_get_rcu(shared[i]))
466 break;
467 }
468 }
469
470 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
471 while (i--)
472 dma_fence_put(shared[i]);
473 dma_fence_put(fence_excl);
474 goto unlock;
475 }
476
477 ret = 0;
478unlock:
479 rcu_read_unlock();
480 } while (ret);
481
482 if (pfence_excl)
483 *pfence_excl = fence_excl;
484 else if (fence_excl)
485 shared[shared_count++] = fence_excl;
486
487 if (!shared_count) {
488 kfree(shared);
489 shared = NULL;
490 }
491
492 *pshared_count = shared_count;
493 *pshared = shared;
494 return ret;
495}
496EXPORT_SYMBOL_GPL(dma_resv_get_fences);
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
513 unsigned long timeout)
514{
515 long ret = timeout ? timeout : 1;
516 unsigned int seq, shared_count;
517 struct dma_fence *fence;
518 int i;
519
520retry:
521 shared_count = 0;
522 seq = read_seqcount_begin(&obj->seq);
523 rcu_read_lock();
524 i = -1;
525
526 fence = dma_resv_excl_fence(obj);
527 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
528 if (!dma_fence_get_rcu(fence))
529 goto unlock_retry;
530
531 if (dma_fence_is_signaled(fence)) {
532 dma_fence_put(fence);
533 fence = NULL;
534 }
535
536 } else {
537 fence = NULL;
538 }
539
540 if (wait_all) {
541 struct dma_resv_list *fobj = dma_resv_shared_list(obj);
542
543 if (fobj)
544 shared_count = fobj->shared_count;
545
546 for (i = 0; !fence && i < shared_count; ++i) {
547 struct dma_fence *lfence;
548
549 lfence = rcu_dereference(fobj->shared[i]);
550 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
551 &lfence->flags))
552 continue;
553
554 if (!dma_fence_get_rcu(lfence))
555 goto unlock_retry;
556
557 if (dma_fence_is_signaled(lfence)) {
558 dma_fence_put(lfence);
559 continue;
560 }
561
562 fence = lfence;
563 break;
564 }
565 }
566
567 rcu_read_unlock();
568 if (fence) {
569 if (read_seqcount_retry(&obj->seq, seq)) {
570 dma_fence_put(fence);
571 goto retry;
572 }
573
574 ret = dma_fence_wait_timeout(fence, intr, ret);
575 dma_fence_put(fence);
576 if (ret > 0 && wait_all && (i + 1 < shared_count))
577 goto retry;
578 }
579 return ret;
580
581unlock_retry:
582 rcu_read_unlock();
583 goto retry;
584}
585EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
586
587
588static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
589{
590 struct dma_fence *fence, *lfence = passed_fence;
591 int ret = 1;
592
593 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
594 fence = dma_fence_get_rcu(lfence);
595 if (!fence)
596 return -1;
597
598 ret = !!dma_fence_is_signaled(fence);
599 dma_fence_put(fence);
600 }
601 return ret;
602}
603
604
605
606
607
608
609
610
611
612
613
614
615
616bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
617{
618 struct dma_fence *fence;
619 unsigned int seq;
620 int ret;
621
622 rcu_read_lock();
623retry:
624 ret = true;
625 seq = read_seqcount_begin(&obj->seq);
626
627 if (test_all) {
628 struct dma_resv_list *fobj = dma_resv_shared_list(obj);
629 unsigned int i, shared_count;
630
631 shared_count = fobj ? fobj->shared_count : 0;
632 for (i = 0; i < shared_count; ++i) {
633 fence = rcu_dereference(fobj->shared[i]);
634 ret = dma_resv_test_signaled_single(fence);
635 if (ret < 0)
636 goto retry;
637 else if (!ret)
638 break;
639 }
640 }
641
642 fence = dma_resv_excl_fence(obj);
643 if (ret && fence) {
644 ret = dma_resv_test_signaled_single(fence);
645 if (ret < 0)
646 goto retry;
647
648 }
649
650 if (read_seqcount_retry(&obj->seq, seq))
651 goto retry;
652
653 rcu_read_unlock();
654 return ret;
655}
656EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
657
658#if IS_ENABLED(CONFIG_LOCKDEP)
659static int __init dma_resv_lockdep(void)
660{
661 struct mm_struct *mm = mm_alloc();
662 struct ww_acquire_ctx ctx;
663 struct dma_resv obj;
664 struct address_space mapping;
665 int ret;
666
667 if (!mm)
668 return -ENOMEM;
669
670 dma_resv_init(&obj);
671 address_space_init_once(&mapping);
672
673 mmap_read_lock(mm);
674 ww_acquire_init(&ctx, &reservation_ww_class);
675 ret = dma_resv_lock(&obj, &ctx);
676 if (ret == -EDEADLK)
677 dma_resv_lock_slow(&obj, &ctx);
678 fs_reclaim_acquire(GFP_KERNEL);
679
680 i_mmap_lock_write(&mapping);
681 i_mmap_unlock_write(&mapping);
682#ifdef CONFIG_MMU_NOTIFIER
683 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
684 __dma_fence_might_wait();
685 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
686#else
687 __dma_fence_might_wait();
688#endif
689 fs_reclaim_release(GFP_KERNEL);
690 ww_mutex_unlock(&obj.lock);
691 ww_acquire_fini(&ctx);
692 mmap_read_unlock(mm);
693
694 mmput(mm);
695
696 return 0;
697}
698subsys_initcall(dma_resv_lockdep);
699#endif
700