1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#define pr_fmt(fmt) "[TTM] " fmt
61
62#include <drm/ttm/ttm_module.h>
63#include <linux/list.h>
64#include <linux/spinlock.h>
65#include <linux/slab.h>
66#include <linux/atomic.h>
67#include "ttm_object.h"
68
69struct ttm_object_file {
70 struct ttm_object_device *tdev;
71 spinlock_t lock;
72 struct list_head ref_list;
73 struct drm_open_hash ref_hash[TTM_REF_NUM];
74 struct kref refcount;
75};
76
77
78
79
80
81
82
83
84
85
86
87
88
89struct ttm_object_device {
90 spinlock_t object_lock;
91 struct drm_open_hash object_hash;
92 atomic_t object_count;
93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
97 struct idr idr;
98};
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct ttm_ref_object {
122 struct rcu_head rcu_head;
123 struct drm_hash_item hash;
124 struct list_head head;
125 struct kref kref;
126 enum ttm_ref_type ref_type;
127 struct ttm_base_object *obj;
128 struct ttm_object_file *tfile;
129};
130
131static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
132
133static inline struct ttm_object_file *
134ttm_object_file_ref(struct ttm_object_file *tfile)
135{
136 kref_get(&tfile->refcount);
137 return tfile;
138}
139
140static void ttm_object_file_destroy(struct kref *kref)
141{
142 struct ttm_object_file *tfile =
143 container_of(kref, struct ttm_object_file, refcount);
144
145 kfree(tfile);
146}
147
148
149static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
150{
151 struct ttm_object_file *tfile = *p_tfile;
152
153 *p_tfile = NULL;
154 kref_put(&tfile->refcount, ttm_object_file_destroy);
155}
156
157
158int ttm_base_object_init(struct ttm_object_file *tfile,
159 struct ttm_base_object *base,
160 bool shareable,
161 enum ttm_object_type object_type,
162 void (*refcount_release) (struct ttm_base_object **),
163 void (*ref_obj_release) (struct ttm_base_object *,
164 enum ttm_ref_type ref_type))
165{
166 struct ttm_object_device *tdev = tfile->tdev;
167 int ret;
168
169 base->shareable = shareable;
170 base->tfile = ttm_object_file_ref(tfile);
171 base->refcount_release = refcount_release;
172 base->ref_obj_release = ref_obj_release;
173 base->object_type = object_type;
174 kref_init(&base->refcount);
175 idr_preload(GFP_KERNEL);
176 spin_lock(&tdev->object_lock);
177 ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
178 spin_unlock(&tdev->object_lock);
179 idr_preload_end();
180 if (ret < 0)
181 return ret;
182
183 base->handle = ret;
184 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
185 if (unlikely(ret != 0))
186 goto out_err1;
187
188 ttm_base_object_unref(&base);
189
190 return 0;
191out_err1:
192 spin_lock(&tdev->object_lock);
193 idr_remove(&tdev->idr, base->handle);
194 spin_unlock(&tdev->object_lock);
195 return ret;
196}
197
198static void ttm_release_base(struct kref *kref)
199{
200 struct ttm_base_object *base =
201 container_of(kref, struct ttm_base_object, refcount);
202 struct ttm_object_device *tdev = base->tfile->tdev;
203
204 spin_lock(&tdev->object_lock);
205 idr_remove(&tdev->idr, base->handle);
206 spin_unlock(&tdev->object_lock);
207
208
209
210
211
212
213
214 ttm_object_file_unref(&base->tfile);
215 if (base->refcount_release)
216 base->refcount_release(&base);
217}
218
219void ttm_base_object_unref(struct ttm_base_object **p_base)
220{
221 struct ttm_base_object *base = *p_base;
222
223 *p_base = NULL;
224
225 kref_put(&base->refcount, ttm_release_base);
226}
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244struct ttm_base_object *
245ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
246{
247 struct drm_hash_item *hash;
248 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
249 int ret;
250
251 rcu_read_lock();
252 ret = drm_ht_find_item_rcu(ht, key, &hash);
253 if (ret) {
254 rcu_read_unlock();
255 return NULL;
256 }
257
258 __release(RCU);
259 return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
260}
261EXPORT_SYMBOL(ttm_base_object_noref_lookup);
262
263struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
264 uint32_t key)
265{
266 struct ttm_base_object *base = NULL;
267 struct drm_hash_item *hash;
268 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
269 int ret;
270
271 rcu_read_lock();
272 ret = drm_ht_find_item_rcu(ht, key, &hash);
273
274 if (likely(ret == 0)) {
275 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
276 if (!kref_get_unless_zero(&base->refcount))
277 base = NULL;
278 }
279 rcu_read_unlock();
280
281 return base;
282}
283
284struct ttm_base_object *
285ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
286{
287 struct ttm_base_object *base;
288
289 rcu_read_lock();
290 base = idr_find(&tdev->idr, key);
291
292 if (base && !kref_get_unless_zero(&base->refcount))
293 base = NULL;
294 rcu_read_unlock();
295
296 return base;
297}
298
299
300
301
302
303
304
305
306
307
308
309bool ttm_ref_object_exists(struct ttm_object_file *tfile,
310 struct ttm_base_object *base)
311{
312 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
313 struct drm_hash_item *hash;
314 struct ttm_ref_object *ref;
315
316 rcu_read_lock();
317 if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
318 goto out_false;
319
320
321
322
323
324
325 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
326 if (unlikely(base != ref->obj))
327 goto out_false;
328
329
330
331
332 rmb();
333 if (unlikely(kref_read(&ref->kref) == 0))
334 goto out_false;
335
336 rcu_read_unlock();
337 return true;
338
339 out_false:
340 rcu_read_unlock();
341 return false;
342}
343
344int ttm_ref_object_add(struct ttm_object_file *tfile,
345 struct ttm_base_object *base,
346 enum ttm_ref_type ref_type, bool *existed,
347 bool require_existed)
348{
349 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
350 struct ttm_ref_object *ref;
351 struct drm_hash_item *hash;
352 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
353 struct ttm_operation_ctx ctx = {
354 .interruptible = false,
355 .no_wait_gpu = false
356 };
357 int ret = -EINVAL;
358
359 if (base->tfile != tfile && !base->shareable)
360 return -EPERM;
361
362 if (existed != NULL)
363 *existed = true;
364
365 while (ret == -EINVAL) {
366 rcu_read_lock();
367 ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
368
369 if (ret == 0) {
370 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
371 if (kref_get_unless_zero(&ref->kref)) {
372 rcu_read_unlock();
373 break;
374 }
375 }
376
377 rcu_read_unlock();
378 if (require_existed)
379 return -EPERM;
380
381 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
382 &ctx);
383 if (unlikely(ret != 0))
384 return ret;
385 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
386 if (unlikely(ref == NULL)) {
387 ttm_mem_global_free(mem_glob, sizeof(*ref));
388 return -ENOMEM;
389 }
390
391 ref->hash.key = base->handle;
392 ref->obj = base;
393 ref->tfile = tfile;
394 ref->ref_type = ref_type;
395 kref_init(&ref->kref);
396
397 spin_lock(&tfile->lock);
398 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
399
400 if (likely(ret == 0)) {
401 list_add_tail(&ref->head, &tfile->ref_list);
402 kref_get(&base->refcount);
403 spin_unlock(&tfile->lock);
404 if (existed != NULL)
405 *existed = false;
406 break;
407 }
408
409 spin_unlock(&tfile->lock);
410 BUG_ON(ret != -EINVAL);
411
412 ttm_mem_global_free(mem_glob, sizeof(*ref));
413 kfree(ref);
414 }
415
416 return ret;
417}
418
419static void __releases(tfile->lock) __acquires(tfile->lock)
420ttm_ref_object_release(struct kref *kref)
421{
422 struct ttm_ref_object *ref =
423 container_of(kref, struct ttm_ref_object, kref);
424 struct ttm_base_object *base = ref->obj;
425 struct ttm_object_file *tfile = ref->tfile;
426 struct drm_open_hash *ht;
427 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
428
429 ht = &tfile->ref_hash[ref->ref_type];
430 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
431 list_del(&ref->head);
432 spin_unlock(&tfile->lock);
433
434 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
435 base->ref_obj_release(base, ref->ref_type);
436
437 ttm_base_object_unref(&ref->obj);
438 ttm_mem_global_free(mem_glob, sizeof(*ref));
439 kfree_rcu(ref, rcu_head);
440 spin_lock(&tfile->lock);
441}
442
443int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
444 unsigned long key, enum ttm_ref_type ref_type)
445{
446 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
447 struct ttm_ref_object *ref;
448 struct drm_hash_item *hash;
449 int ret;
450
451 spin_lock(&tfile->lock);
452 ret = drm_ht_find_item(ht, key, &hash);
453 if (unlikely(ret != 0)) {
454 spin_unlock(&tfile->lock);
455 return -EINVAL;
456 }
457 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
458 kref_put(&ref->kref, ttm_ref_object_release);
459 spin_unlock(&tfile->lock);
460 return 0;
461}
462
463void ttm_object_file_release(struct ttm_object_file **p_tfile)
464{
465 struct ttm_ref_object *ref;
466 struct list_head *list;
467 unsigned int i;
468 struct ttm_object_file *tfile = *p_tfile;
469
470 *p_tfile = NULL;
471 spin_lock(&tfile->lock);
472
473
474
475
476
477
478 while (!list_empty(&tfile->ref_list)) {
479 list = tfile->ref_list.next;
480 ref = list_entry(list, struct ttm_ref_object, head);
481 ttm_ref_object_release(&ref->kref);
482 }
483
484 spin_unlock(&tfile->lock);
485 for (i = 0; i < TTM_REF_NUM; ++i)
486 drm_ht_remove(&tfile->ref_hash[i]);
487
488 ttm_object_file_unref(&tfile);
489}
490
491struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
492 unsigned int hash_order)
493{
494 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
495 unsigned int i;
496 unsigned int j = 0;
497 int ret;
498
499 if (unlikely(tfile == NULL))
500 return NULL;
501
502 spin_lock_init(&tfile->lock);
503 tfile->tdev = tdev;
504 kref_init(&tfile->refcount);
505 INIT_LIST_HEAD(&tfile->ref_list);
506
507 for (i = 0; i < TTM_REF_NUM; ++i) {
508 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
509 if (ret) {
510 j = i;
511 goto out_err;
512 }
513 }
514
515 return tfile;
516out_err:
517 for (i = 0; i < j; ++i)
518 drm_ht_remove(&tfile->ref_hash[i]);
519
520 kfree(tfile);
521
522 return NULL;
523}
524
525struct ttm_object_device *
526ttm_object_device_init(struct ttm_mem_global *mem_glob,
527 unsigned int hash_order,
528 const struct dma_buf_ops *ops)
529{
530 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
531 int ret;
532
533 if (unlikely(tdev == NULL))
534 return NULL;
535
536 tdev->mem_glob = mem_glob;
537 spin_lock_init(&tdev->object_lock);
538 atomic_set(&tdev->object_count, 0);
539 ret = drm_ht_create(&tdev->object_hash, hash_order);
540 if (ret != 0)
541 goto out_no_object_hash;
542
543 idr_init(&tdev->idr);
544 tdev->ops = *ops;
545 tdev->dmabuf_release = tdev->ops.release;
546 tdev->ops.release = ttm_prime_dmabuf_release;
547 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
548 ttm_round_pot(sizeof(struct file));
549 return tdev;
550
551out_no_object_hash:
552 kfree(tdev);
553 return NULL;
554}
555
556void ttm_object_device_release(struct ttm_object_device **p_tdev)
557{
558 struct ttm_object_device *tdev = *p_tdev;
559
560 *p_tdev = NULL;
561
562 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
563 idr_destroy(&tdev->idr);
564 drm_ht_remove(&tdev->object_hash);
565
566 kfree(tdev);
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
583{
584 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
585}
586
587
588
589
590
591
592
593
594
595
596
597static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
598{
599 struct ttm_base_object *base = *p_base;
600 struct ttm_prime_object *prime;
601
602 *p_base = NULL;
603 prime = container_of(base, struct ttm_prime_object, base);
604 BUG_ON(prime->dma_buf != NULL);
605 mutex_destroy(&prime->mutex);
606 if (prime->refcount_release)
607 prime->refcount_release(&base);
608}
609
610
611
612
613
614
615
616
617
618
619
620static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
621{
622 struct ttm_prime_object *prime =
623 (struct ttm_prime_object *) dma_buf->priv;
624 struct ttm_base_object *base = &prime->base;
625 struct ttm_object_device *tdev = base->tfile->tdev;
626
627 if (tdev->dmabuf_release)
628 tdev->dmabuf_release(dma_buf);
629 mutex_lock(&prime->mutex);
630 if (prime->dma_buf == dma_buf)
631 prime->dma_buf = NULL;
632 mutex_unlock(&prime->mutex);
633 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
634 ttm_base_object_unref(&base);
635}
636
637
638
639
640
641
642
643
644
645
646
647
648int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
649 int fd, u32 *handle)
650{
651 struct ttm_object_device *tdev = tfile->tdev;
652 struct dma_buf *dma_buf;
653 struct ttm_prime_object *prime;
654 struct ttm_base_object *base;
655 int ret;
656
657 dma_buf = dma_buf_get(fd);
658 if (IS_ERR(dma_buf))
659 return PTR_ERR(dma_buf);
660
661 if (dma_buf->ops != &tdev->ops)
662 return -ENOSYS;
663
664 prime = (struct ttm_prime_object *) dma_buf->priv;
665 base = &prime->base;
666 *handle = base->handle;
667 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
668
669 dma_buf_put(dma_buf);
670
671 return ret;
672}
673
674
675
676
677
678
679
680
681
682
683int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
684 uint32_t handle, uint32_t flags,
685 int *prime_fd)
686{
687 struct ttm_object_device *tdev = tfile->tdev;
688 struct ttm_base_object *base;
689 struct dma_buf *dma_buf;
690 struct ttm_prime_object *prime;
691 int ret;
692
693 base = ttm_base_object_lookup(tfile, handle);
694 if (unlikely(base == NULL ||
695 base->object_type != ttm_prime_type)) {
696 ret = -ENOENT;
697 goto out_unref;
698 }
699
700 prime = container_of(base, struct ttm_prime_object, base);
701 if (unlikely(!base->shareable)) {
702 ret = -EPERM;
703 goto out_unref;
704 }
705
706 ret = mutex_lock_interruptible(&prime->mutex);
707 if (unlikely(ret != 0)) {
708 ret = -ERESTARTSYS;
709 goto out_unref;
710 }
711
712 dma_buf = prime->dma_buf;
713 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
714 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
715 struct ttm_operation_ctx ctx = {
716 .interruptible = true,
717 .no_wait_gpu = false
718 };
719 exp_info.ops = &tdev->ops;
720 exp_info.size = prime->size;
721 exp_info.flags = flags;
722 exp_info.priv = prime;
723
724
725
726
727 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
728 &ctx);
729 if (unlikely(ret != 0)) {
730 mutex_unlock(&prime->mutex);
731 goto out_unref;
732 }
733
734 dma_buf = dma_buf_export(&exp_info);
735 if (IS_ERR(dma_buf)) {
736 ret = PTR_ERR(dma_buf);
737 ttm_mem_global_free(tdev->mem_glob,
738 tdev->dma_buf_size);
739 mutex_unlock(&prime->mutex);
740 goto out_unref;
741 }
742
743
744
745
746 base = NULL;
747 prime->dma_buf = dma_buf;
748 }
749 mutex_unlock(&prime->mutex);
750
751 ret = dma_buf_fd(dma_buf, flags);
752 if (ret >= 0) {
753 *prime_fd = ret;
754 ret = 0;
755 } else
756 dma_buf_put(dma_buf);
757
758out_unref:
759 if (base)
760 ttm_base_object_unref(&base);
761 return ret;
762}
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
779 struct ttm_prime_object *prime, bool shareable,
780 enum ttm_object_type type,
781 void (*refcount_release) (struct ttm_base_object **),
782 void (*ref_obj_release) (struct ttm_base_object *,
783 enum ttm_ref_type ref_type))
784{
785 mutex_init(&prime->mutex);
786 prime->size = PAGE_ALIGN(size);
787 prime->real_type = type;
788 prime->dma_buf = NULL;
789 prime->refcount_release = refcount_release;
790 return ttm_base_object_init(tfile, &prime->base, shareable,
791 ttm_prime_type,
792 ttm_prime_refcount_release,
793 ref_obj_release);
794}
795