1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drmP.h>
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir)
32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct sg_table *st;
35 struct scatterlist *src, *dst;
36 int ret, i;
37
38 ret = i915_mutex_lock_interruptible(obj->base.dev);
39 if (ret)
40 return ERR_PTR(ret);
41
42 ret = i915_gem_object_get_pages(obj);
43 if (ret) {
44 st = ERR_PTR(ret);
45 goto out;
46 }
47
48
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
53 }
54
55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 if (ret) {
57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), src->length, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
74 goto out;
75 }
76
77 i915_gem_object_pin_pages(obj);
78
79out:
80 mutex_unlock(&obj->base.dev->struct_mutex);
81 return st;
82}
83
84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg,
86 enum dma_data_direction dir)
87{
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg);
90 kfree(sg);
91}
92
93static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
94{
95 struct drm_i915_gem_object *obj = dma_buf->priv;
96
97 if (obj->base.export_dma_buf == dma_buf) {
98
99 obj->base.export_dma_buf = NULL;
100 drm_gem_object_unreference_unlocked(&obj->base);
101 }
102}
103
104static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105{
106 struct drm_i915_gem_object *obj = dma_buf->priv;
107 struct drm_device *dev = obj->base.dev;
108 struct sg_page_iter sg_iter;
109 struct page **pages;
110 int ret, i;
111
112 ret = i915_mutex_lock_interruptible(dev);
113 if (ret)
114 return ERR_PTR(ret);
115
116 if (obj->dma_buf_vmapping) {
117 obj->vmapping_count++;
118 goto out_unlock;
119 }
120
121 ret = i915_gem_object_get_pages(obj);
122 if (ret)
123 goto error;
124
125 ret = -ENOMEM;
126
127 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
128 if (pages == NULL)
129 goto error;
130
131 i = 0;
132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
133 pages[i++] = sg_page_iter_page(&sg_iter);
134
135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
136 drm_free_large(pages);
137
138 if (!obj->dma_buf_vmapping)
139 goto error;
140
141 obj->vmapping_count = 1;
142 i915_gem_object_pin_pages(obj);
143out_unlock:
144 mutex_unlock(&dev->struct_mutex);
145 return obj->dma_buf_vmapping;
146
147error:
148 mutex_unlock(&dev->struct_mutex);
149 return ERR_PTR(ret);
150}
151
152static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
153{
154 struct drm_i915_gem_object *obj = dma_buf->priv;
155 struct drm_device *dev = obj->base.dev;
156 int ret;
157
158 ret = i915_mutex_lock_interruptible(dev);
159 if (ret)
160 return;
161
162 if (--obj->vmapping_count == 0) {
163 vunmap(obj->dma_buf_vmapping);
164 obj->dma_buf_vmapping = NULL;
165
166 i915_gem_object_unpin_pages(obj);
167 }
168 mutex_unlock(&dev->struct_mutex);
169}
170
171static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
172{
173 return NULL;
174}
175
176static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
177{
178
179}
180static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
181{
182 return NULL;
183}
184
185static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
186{
187
188}
189
190static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
191{
192 return -EINVAL;
193}
194
195static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
196{
197 struct drm_i915_gem_object *obj = dma_buf->priv;
198 struct drm_device *dev = obj->base.dev;
199 int ret;
200 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
201
202 ret = i915_mutex_lock_interruptible(dev);
203 if (ret)
204 return ret;
205
206 ret = i915_gem_object_set_to_cpu_domain(obj, write);
207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209}
210
211static const struct dma_buf_ops i915_dmabuf_ops = {
212 .map_dma_buf = i915_gem_map_dma_buf,
213 .unmap_dma_buf = i915_gem_unmap_dma_buf,
214 .release = i915_gem_dmabuf_release,
215 .kmap = i915_gem_dmabuf_kmap,
216 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
217 .kunmap = i915_gem_dmabuf_kunmap,
218 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
219 .mmap = i915_gem_dmabuf_mmap,
220 .vmap = i915_gem_dmabuf_vmap,
221 .vunmap = i915_gem_dmabuf_vunmap,
222 .begin_cpu_access = i915_gem_begin_cpu_access,
223};
224
225struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226 struct drm_gem_object *gem_obj, int flags)
227{
228 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
229
230 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
231}
232
233static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
234{
235 struct sg_table *sg;
236
237 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
238 if (IS_ERR(sg))
239 return PTR_ERR(sg);
240
241 obj->pages = sg;
242 obj->has_dma_mapping = true;
243 return 0;
244}
245
246static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
247{
248 dma_buf_unmap_attachment(obj->base.import_attach,
249 obj->pages, DMA_BIDIRECTIONAL);
250 obj->has_dma_mapping = false;
251}
252
253static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
254 .get_pages = i915_gem_object_get_pages_dmabuf,
255 .put_pages = i915_gem_object_put_pages_dmabuf,
256};
257
258struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
259 struct dma_buf *dma_buf)
260{
261 struct dma_buf_attachment *attach;
262 struct drm_i915_gem_object *obj;
263 int ret;
264
265
266 if (dma_buf->ops == &i915_dmabuf_ops) {
267 obj = dma_buf->priv;
268
269 if (obj->base.dev == dev) {
270
271
272
273
274 drm_gem_object_reference(&obj->base);
275 return &obj->base;
276 }
277 }
278
279
280 attach = dma_buf_attach(dma_buf, dev->dev);
281 if (IS_ERR(attach))
282 return ERR_CAST(attach);
283
284 get_dma_buf(dma_buf);
285
286 obj = i915_gem_object_alloc(dev);
287 if (obj == NULL) {
288 ret = -ENOMEM;
289 goto fail_detach;
290 }
291
292 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
293 if (ret) {
294 i915_gem_object_free(obj);
295 goto fail_detach;
296 }
297
298 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
299 obj->base.import_attach = attach;
300
301 return &obj->base;
302
303fail_detach:
304 dma_buf_detach(dma_buf, attach);
305 dma_buf_put(dma_buf);
306
307 return ERR_PTR(ret);
308}
309