1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/dma-buf.h>
28
29#include <drm/drm_prime.h>
30#include <drm/radeon_drm.h>
31
32#include "radeon.h"
33
34struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
35{
36 struct radeon_bo *bo = gem_to_radeon_bo(obj);
37 int npages = bo->tbo.num_pages;
38
39 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
40}
41
42void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
43{
44 struct radeon_bo *bo = gem_to_radeon_bo(obj);
45 int ret;
46
47 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
48 &bo->dma_buf_vmap);
49 if (ret)
50 return ERR_PTR(ret);
51
52 return bo->dma_buf_vmap.virtual;
53}
54
55void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
56{
57 struct radeon_bo *bo = gem_to_radeon_bo(obj);
58
59 ttm_bo_kunmap(&bo->dma_buf_vmap);
60}
61
62struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
63 struct dma_buf_attachment *attach,
64 struct sg_table *sg)
65{
66 struct reservation_object *resv = attach->dmabuf->resv;
67 struct radeon_device *rdev = dev->dev_private;
68 struct radeon_bo *bo;
69 int ret;
70
71 ww_mutex_lock(&resv->lock, NULL);
72 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
74 ww_mutex_unlock(&resv->lock);
75 if (ret)
76 return ERR_PTR(ret);
77
78 mutex_lock(&rdev->gem.mutex);
79 list_add_tail(&bo->list, &rdev->gem.objects);
80 mutex_unlock(&rdev->gem.mutex);
81
82 bo->prime_shared_count = 1;
83 return &bo->gem_base;
84}
85
86int radeon_gem_prime_pin(struct drm_gem_object *obj)
87{
88 struct radeon_bo *bo = gem_to_radeon_bo(obj);
89 int ret = 0;
90
91 ret = radeon_bo_reserve(bo, false);
92 if (unlikely(ret != 0))
93 return ret;
94
95
96 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
97 if (likely(ret == 0))
98 bo->prime_shared_count++;
99
100 radeon_bo_unreserve(bo);
101 return ret;
102}
103
104void radeon_gem_prime_unpin(struct drm_gem_object *obj)
105{
106 struct radeon_bo *bo = gem_to_radeon_bo(obj);
107 int ret = 0;
108
109 ret = radeon_bo_reserve(bo, false);
110 if (unlikely(ret != 0))
111 return;
112
113 radeon_bo_unpin(bo);
114 if (bo->prime_shared_count)
115 bo->prime_shared_count--;
116 radeon_bo_unreserve(bo);
117}
118
119
120struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
121{
122 struct radeon_bo *bo = gem_to_radeon_bo(obj);
123
124 return bo->tbo.resv;
125}
126
127struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
128 struct drm_gem_object *gobj,
129 int flags)
130{
131 struct radeon_bo *bo = gem_to_radeon_bo(gobj);
132 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
133 return ERR_PTR(-EPERM);
134 return drm_gem_prime_export(dev, gobj, flags);
135}
136