1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drmP.h>
27
28#include "radeon.h"
29#include <drm/radeon_drm.h>
30#include <linux/dma-buf.h>
31
32struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
33{
34 struct radeon_bo *bo = gem_to_radeon_bo(obj);
35 int npages = bo->tbo.num_pages;
36
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
38}
39
40void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
41{
42 struct radeon_bo *bo = gem_to_radeon_bo(obj);
43 int ret;
44
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
46 &bo->dma_buf_vmap);
47 if (ret)
48 return ERR_PTR(ret);
49
50 return bo->dma_buf_vmap.virtual;
51}
52
53void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
54{
55 struct radeon_bo *bo = gem_to_radeon_bo(obj);
56
57 ttm_bo_kunmap(&bo->dma_buf_vmap);
58}
59
60struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach,
62 struct sg_table *sg)
63{
64 struct reservation_object *resv = attach->dmabuf->resv;
65 struct radeon_device *rdev = dev->dev_private;
66 struct radeon_bo *bo;
67 int ret;
68
69 ww_mutex_lock(&resv->lock, NULL);
70 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
71 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
73 if (ret)
74 return ERR_PTR(ret);
75
76 mutex_lock(&rdev->gem.mutex);
77 list_add_tail(&bo->list, &rdev->gem.objects);
78 mutex_unlock(&rdev->gem.mutex);
79
80 bo->prime_shared_count = 1;
81 return &bo->gem_base;
82}
83
84int radeon_gem_prime_pin(struct drm_gem_object *obj)
85{
86 struct radeon_bo *bo = gem_to_radeon_bo(obj);
87 int ret = 0;
88
89 ret = radeon_bo_reserve(bo, false);
90 if (unlikely(ret != 0))
91 return ret;
92
93
94 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
95 if (likely(ret == 0))
96 bo->prime_shared_count++;
97
98 radeon_bo_unreserve(bo);
99 return ret;
100}
101
102void radeon_gem_prime_unpin(struct drm_gem_object *obj)
103{
104 struct radeon_bo *bo = gem_to_radeon_bo(obj);
105 int ret = 0;
106
107 ret = radeon_bo_reserve(bo, false);
108 if (unlikely(ret != 0))
109 return;
110
111 radeon_bo_unpin(bo);
112 if (bo->prime_shared_count)
113 bo->prime_shared_count--;
114 radeon_bo_unreserve(bo);
115}
116
117
118struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
119{
120 struct radeon_bo *bo = gem_to_radeon_bo(obj);
121
122 return bo->tbo.resv;
123}
124
125struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
126 struct drm_gem_object *gobj,
127 int flags)
128{
129 struct radeon_bo *bo = gem_to_radeon_bo(gobj);
130 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
131 return ERR_PTR(-EPERM);
132 return drm_gem_prime_export(dev, gobj, flags);
133}
134