1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drmP.h>
27#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37{
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
90}
91
92static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95{
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 mutex_lock(&obj->base.dev->struct_mutex);
99
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 sg_free_table(sg);
102 kfree(sg);
103
104 i915_gem_object_unpin_pages(obj);
105
106 mutex_unlock(&obj->base.dev->struct_mutex);
107}
108
109static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
110{
111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 struct drm_device *dev = obj->base.dev;
113 struct sg_page_iter sg_iter;
114 struct page **pages;
115 int ret, i;
116
117 ret = i915_mutex_lock_interruptible(dev);
118 if (ret)
119 return ERR_PTR(ret);
120
121 if (obj->dma_buf_vmapping) {
122 obj->vmapping_count++;
123 goto out_unlock;
124 }
125
126 ret = i915_gem_object_get_pages(obj);
127 if (ret)
128 goto err;
129
130 i915_gem_object_pin_pages(obj);
131
132 ret = -ENOMEM;
133
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
135 if (pages == NULL)
136 goto err_unpin;
137
138 i = 0;
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
140 pages[i++] = sg_page_iter_page(&sg_iter);
141
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
143 drm_free_large(pages);
144
145 if (!obj->dma_buf_vmapping)
146 goto err_unpin;
147
148 obj->vmapping_count = 1;
149out_unlock:
150 mutex_unlock(&dev->struct_mutex);
151 return obj->dma_buf_vmapping;
152
153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
156 mutex_unlock(&dev->struct_mutex);
157 return ERR_PTR(ret);
158}
159
160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev;
164
165 mutex_lock(&dev->struct_mutex);
166 if (--obj->vmapping_count == 0) {
167 vunmap(obj->dma_buf_vmapping);
168 obj->dma_buf_vmapping = NULL;
169
170 i915_gem_object_unpin_pages(obj);
171 }
172 mutex_unlock(&dev->struct_mutex);
173}
174
175static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
176{
177 return NULL;
178}
179
180static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
181{
182
183}
184static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
185{
186 return NULL;
187}
188
189static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
190{
191
192}
193
194static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
195{
196 return -EINVAL;
197}
198
199static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
200{
201 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
202 struct drm_device *dev = obj->base.dev;
203 int ret;
204 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
205
206 ret = i915_mutex_lock_interruptible(dev);
207 if (ret)
208 return ret;
209
210 ret = i915_gem_object_set_to_cpu_domain(obj, write);
211 mutex_unlock(&dev->struct_mutex);
212 return ret;
213}
214
215static const struct dma_buf_ops i915_dmabuf_ops = {
216 .map_dma_buf = i915_gem_map_dma_buf,
217 .unmap_dma_buf = i915_gem_unmap_dma_buf,
218 .release = drm_gem_dmabuf_release,
219 .kmap = i915_gem_dmabuf_kmap,
220 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
221 .kunmap = i915_gem_dmabuf_kunmap,
222 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
223 .mmap = i915_gem_dmabuf_mmap,
224 .vmap = i915_gem_dmabuf_vmap,
225 .vunmap = i915_gem_dmabuf_vunmap,
226 .begin_cpu_access = i915_gem_begin_cpu_access,
227};
228
229struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
230 struct drm_gem_object *gem_obj, int flags)
231{
232 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
233 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
234
235 exp_info.ops = &i915_dmabuf_ops;
236 exp_info.size = gem_obj->size;
237 exp_info.flags = flags;
238 exp_info.priv = gem_obj;
239
240
241 if (obj->ops->dmabuf_export) {
242 int ret = obj->ops->dmabuf_export(obj);
243 if (ret)
244 return ERR_PTR(ret);
245 }
246
247 return dma_buf_export(&exp_info);
248}
249
250static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
251{
252 struct sg_table *sg;
253
254 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
255 if (IS_ERR(sg))
256 return PTR_ERR(sg);
257
258 obj->pages = sg;
259 return 0;
260}
261
262static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
263{
264 dma_buf_unmap_attachment(obj->base.import_attach,
265 obj->pages, DMA_BIDIRECTIONAL);
266}
267
268static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
269 .get_pages = i915_gem_object_get_pages_dmabuf,
270 .put_pages = i915_gem_object_put_pages_dmabuf,
271};
272
273struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
274 struct dma_buf *dma_buf)
275{
276 struct dma_buf_attachment *attach;
277 struct drm_i915_gem_object *obj;
278 int ret;
279
280
281 if (dma_buf->ops == &i915_dmabuf_ops) {
282 obj = dma_buf_to_obj(dma_buf);
283
284 if (obj->base.dev == dev) {
285
286
287
288
289 drm_gem_object_reference(&obj->base);
290 return &obj->base;
291 }
292 }
293
294
295 attach = dma_buf_attach(dma_buf, dev->dev);
296 if (IS_ERR(attach))
297 return ERR_CAST(attach);
298
299 get_dma_buf(dma_buf);
300
301 obj = i915_gem_object_alloc(dev);
302 if (obj == NULL) {
303 ret = -ENOMEM;
304 goto fail_detach;
305 }
306
307 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
308 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
309 obj->base.import_attach = attach;
310
311 return &obj->base;
312
313fail_detach:
314 dma_buf_detach(dma_buf, attach);
315 dma_buf_put(dma_buf);
316
317 return ERR_PTR(ret);
318}
319