1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include <drm/amdgpu_drm.h>
39#include <linux/dma-buf.h>
40#include <linux/dma-fence-array.h>
41
42
43
44
45
46
47
48
49
50
51void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
52{
53 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
54 int ret;
55
56 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
57 &bo->dma_buf_vmap);
58 if (ret)
59 return ERR_PTR(ret);
60
61 return bo->dma_buf_vmap.virtual;
62}
63
64
65
66
67
68
69
70
71void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
72{
73 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
74
75 ttm_bo_kunmap(&bo->dma_buf_vmap);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
90 struct vm_area_struct *vma)
91{
92 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
94 unsigned asize = amdgpu_bo_size(bo);
95 int ret;
96
97 if (!vma->vm_file)
98 return -ENODEV;
99
100 if (adev == NULL)
101 return -ENODEV;
102
103
104 if (asize < vma->vm_end - vma->vm_start)
105 return -EINVAL;
106
107 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
108 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
109 return -EPERM;
110 }
111 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
112
113
114 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
115 if (ret)
116 return ret;
117
118 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
119 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
120
121 return ret;
122}
123
124static int
125__dma_resv_make_exclusive(struct dma_resv *obj)
126{
127 struct dma_fence **fences;
128 unsigned int count;
129 int r;
130
131 if (!dma_resv_get_list(obj))
132 return 0;
133
134 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
135 if (r)
136 return r;
137
138 if (count == 0) {
139
140 } else if (count == 1) {
141 dma_resv_add_excl_fence(obj, fences[0]);
142 dma_fence_put(fences[0]);
143 kfree(fences);
144 } else {
145 struct dma_fence_array *array;
146
147 array = dma_fence_array_create(count, fences,
148 dma_fence_context_alloc(1), 0,
149 false);
150 if (!array)
151 goto err_fences_put;
152
153 dma_resv_add_excl_fence(obj, &array->base);
154 dma_fence_put(&array->base);
155 }
156
157 return 0;
158
159err_fences_put:
160 while (count--)
161 dma_fence_put(fences[count]);
162 kfree(fences);
163 return -ENOMEM;
164}
165
166
167
168
169
170
171
172
173
174static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
175 struct dma_buf_attachment *attach)
176{
177 struct drm_gem_object *obj = dmabuf->priv;
178 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
179 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
180 int r;
181
182 if (attach->dev->driver == adev->dev->driver)
183 return 0;
184
185 r = amdgpu_bo_reserve(bo, false);
186 if (unlikely(r != 0))
187 return r;
188
189
190
191
192
193
194
195
196
197 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
198 if (r)
199 return r;
200
201 bo->prime_shared_count++;
202 amdgpu_bo_unreserve(bo);
203 return 0;
204}
205
206
207
208
209
210
211
212
213
214static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
215 struct dma_buf_attachment *attach)
216{
217 struct drm_gem_object *obj = dmabuf->priv;
218 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
219 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
220
221 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
222 bo->prime_shared_count--;
223}
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
239 enum dma_data_direction dir)
240{
241 struct dma_buf *dma_buf = attach->dmabuf;
242 struct drm_gem_object *obj = dma_buf->priv;
243 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
244 struct sg_table *sgt;
245 long r;
246
247 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
248 if (r)
249 return ERR_PTR(r);
250
251 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
252 if (IS_ERR(sgt))
253 return sgt;
254
255 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
256 DMA_ATTR_SKIP_CPU_SYNC))
257 goto error_free;
258
259 return sgt;
260
261error_free:
262 sg_free_table(sgt);
263 kfree(sgt);
264 return ERR_PTR(-ENOMEM);
265}
266
267
268
269
270
271
272
273
274
275
276static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
277 struct sg_table *sgt,
278 enum dma_data_direction dir)
279{
280 struct drm_gem_object *obj = attach->dmabuf->priv;
281 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
282
283 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
284 sg_free_table(sgt);
285 kfree(sgt);
286 amdgpu_bo_unpin(bo);
287}
288
289
290
291
292
293
294
295
296
297
298
299
300
301static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
302 enum dma_data_direction direction)
303{
304 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
305 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
306 struct ttm_operation_ctx ctx = { true, false };
307 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
308 int ret;
309 bool reads = (direction == DMA_BIDIRECTIONAL ||
310 direction == DMA_FROM_DEVICE);
311
312 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
313 return 0;
314
315
316 ret = amdgpu_bo_reserve(bo, false);
317 if (unlikely(ret != 0))
318 return ret;
319
320 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
321 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
322 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
323 }
324
325 amdgpu_bo_unreserve(bo);
326 return ret;
327}
328
329const struct dma_buf_ops amdgpu_dmabuf_ops = {
330 .dynamic_mapping = true,
331 .attach = amdgpu_dma_buf_attach,
332 .detach = amdgpu_dma_buf_detach,
333 .map_dma_buf = amdgpu_dma_buf_map,
334 .unmap_dma_buf = amdgpu_dma_buf_unmap,
335 .release = drm_gem_dmabuf_release,
336 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
337 .mmap = drm_gem_dmabuf_mmap,
338 .vmap = drm_gem_dmabuf_vmap,
339 .vunmap = drm_gem_dmabuf_vunmap,
340};
341
342
343
344
345
346
347
348
349
350
351
352struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
353 int flags)
354{
355 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
356 struct dma_buf *buf;
357
358 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
359 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
360 return ERR_PTR(-EPERM);
361
362 buf = drm_gem_prime_export(gobj, flags);
363 if (!IS_ERR(buf)) {
364 buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
365 buf->ops = &amdgpu_dmabuf_ops;
366 }
367
368 return buf;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383static struct drm_gem_object *
384amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
385{
386 struct dma_resv *resv = dma_buf->resv;
387 struct amdgpu_device *adev = dev->dev_private;
388 struct amdgpu_bo *bo;
389 struct amdgpu_bo_param bp;
390 int ret;
391
392 memset(&bp, 0, sizeof(bp));
393 bp.size = dma_buf->size;
394 bp.byte_align = PAGE_SIZE;
395 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
396 bp.flags = 0;
397 bp.type = ttm_bo_type_sg;
398 bp.resv = resv;
399 dma_resv_lock(resv, NULL);
400 ret = amdgpu_bo_create(adev, &bp, &bo);
401 if (ret)
402 goto error;
403
404 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
405 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
406 if (dma_buf->ops != &amdgpu_dmabuf_ops)
407 bo->prime_shared_count = 1;
408
409 dma_resv_unlock(resv);
410 return &bo->tbo.base;
411
412error:
413 dma_resv_unlock(resv);
414 return ERR_PTR(ret);
415}
416
417
418
419
420
421
422
423
424
425
426
427struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
428 struct dma_buf *dma_buf)
429{
430 struct dma_buf_attachment *attach;
431 struct drm_gem_object *obj;
432
433 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
434 obj = dma_buf->priv;
435 if (obj->dev == dev) {
436
437
438
439
440 drm_gem_object_get(obj);
441 return obj;
442 }
443 }
444
445 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
446 if (IS_ERR(obj))
447 return obj;
448
449 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
450 if (IS_ERR(attach)) {
451 drm_gem_object_put(obj);
452 return ERR_CAST(attach);
453 }
454
455 get_dma_buf(dma_buf);
456 obj->import_attach = attach;
457 return obj;
458}
459