1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include <drm/amdgpu_drm.h>
39#include <linux/dma-buf.h>
40#include <linux/dma-fence-array.h>
41#include <linux/pci-p2pdma.h>
42
43
44
45
46
47
48
49
50
51
52void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
53{
54 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
55 int ret;
56
57 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
58 &bo->dma_buf_vmap);
59 if (ret)
60 return ERR_PTR(ret);
61
62 return bo->dma_buf_vmap.virtual;
63}
64
65
66
67
68
69
70
71
72void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
73{
74 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
75
76 ttm_bo_kunmap(&bo->dma_buf_vmap);
77}
78
79
80
81
82
83
84
85
86
87
88
89
90int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
91 struct vm_area_struct *vma)
92{
93 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
94 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
95 unsigned asize = amdgpu_bo_size(bo);
96 int ret;
97
98 if (!vma->vm_file)
99 return -ENODEV;
100
101 if (adev == NULL)
102 return -ENODEV;
103
104
105 if (asize < vma->vm_end - vma->vm_start)
106 return -EINVAL;
107
108 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
109 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
110 return -EPERM;
111 }
112 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
113
114
115 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
116 if (ret)
117 return ret;
118
119 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
120 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
121
122 return ret;
123}
124
125static int
126__dma_resv_make_exclusive(struct dma_resv *obj)
127{
128 struct dma_fence **fences;
129 unsigned int count;
130 int r;
131
132 if (!dma_resv_get_list(obj))
133 return 0;
134
135 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
136 if (r)
137 return r;
138
139 if (count == 0) {
140
141 } else if (count == 1) {
142 dma_resv_add_excl_fence(obj, fences[0]);
143 dma_fence_put(fences[0]);
144 kfree(fences);
145 } else {
146 struct dma_fence_array *array;
147
148 array = dma_fence_array_create(count, fences,
149 dma_fence_context_alloc(1), 0,
150 false);
151 if (!array)
152 goto err_fences_put;
153
154 dma_resv_add_excl_fence(obj, &array->base);
155 dma_fence_put(&array->base);
156 }
157
158 return 0;
159
160err_fences_put:
161 while (count--)
162 dma_fence_put(fences[count]);
163 kfree(fences);
164 return -ENOMEM;
165}
166
167
168
169
170
171
172
173
174
175static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
176 struct dma_buf_attachment *attach)
177{
178 struct drm_gem_object *obj = dmabuf->priv;
179 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
180 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
181 int r;
182
183 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
184 attach->peer2peer = false;
185
186 if (attach->dev->driver == adev->dev->driver)
187 return 0;
188
189 r = amdgpu_bo_reserve(bo, false);
190 if (unlikely(r != 0))
191 return r;
192
193
194
195
196
197
198
199
200
201 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
202 if (r)
203 return r;
204
205 bo->prime_shared_count++;
206 amdgpu_bo_unreserve(bo);
207 return 0;
208}
209
210
211
212
213
214
215
216
217
218static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
219 struct dma_buf_attachment *attach)
220{
221 struct drm_gem_object *obj = dmabuf->priv;
222 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
223 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
224
225 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
226 bo->prime_shared_count--;
227}
228
229
230
231
232
233
234
235
236static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
237{
238 struct drm_gem_object *obj = attach->dmabuf->priv;
239 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
240
241
242 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
243}
244
245
246
247
248
249
250
251
252static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
253{
254 struct drm_gem_object *obj = attach->dmabuf->priv;
255 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
256
257 amdgpu_bo_unpin(bo);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
274 enum dma_data_direction dir)
275{
276 struct dma_buf *dma_buf = attach->dmabuf;
277 struct drm_gem_object *obj = dma_buf->priv;
278 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
280 struct sg_table *sgt;
281 long r;
282
283 if (!bo->pin_count) {
284
285 struct ttm_operation_ctx ctx = { false, false };
286 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
287
288 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
289 attach->peer2peer) {
290 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
291 domains |= AMDGPU_GEM_DOMAIN_VRAM;
292 }
293 amdgpu_bo_placement_from_domain(bo, domains);
294 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
295 if (r)
296 return ERR_PTR(r);
297
298 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
299 AMDGPU_GEM_DOMAIN_GTT)) {
300 return ERR_PTR(-EBUSY);
301 }
302
303 switch (bo->tbo.mem.mem_type) {
304 case TTM_PL_TT:
305 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
306 bo->tbo.num_pages);
307 if (IS_ERR(sgt))
308 return sgt;
309
310 if (dma_map_sgtable(attach->dev, sgt, dir,
311 DMA_ATTR_SKIP_CPU_SYNC))
312 goto error_free;
313 break;
314
315 case TTM_PL_VRAM:
316 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
317 dir, &sgt);
318 if (r)
319 return ERR_PTR(r);
320 break;
321 default:
322 return ERR_PTR(-EINVAL);
323 }
324
325 return sgt;
326
327error_free:
328 sg_free_table(sgt);
329 kfree(sgt);
330 return ERR_PTR(-EBUSY);
331}
332
333
334
335
336
337
338
339
340
341
342static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
343 struct sg_table *sgt,
344 enum dma_data_direction dir)
345{
346 struct dma_buf *dma_buf = attach->dmabuf;
347 struct drm_gem_object *obj = dma_buf->priv;
348 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
349 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
350
351 if (sgt->sgl->page_link) {
352 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
353 sg_free_table(sgt);
354 kfree(sgt);
355 } else {
356 amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
357 }
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
373 enum dma_data_direction direction)
374{
375 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
376 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
377 struct ttm_operation_ctx ctx = { true, false };
378 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
379 int ret;
380 bool reads = (direction == DMA_BIDIRECTIONAL ||
381 direction == DMA_FROM_DEVICE);
382
383 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
384 return 0;
385
386
387 ret = amdgpu_bo_reserve(bo, false);
388 if (unlikely(ret != 0))
389 return ret;
390
391 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
392 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
393 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
394 }
395
396 amdgpu_bo_unreserve(bo);
397 return ret;
398}
399
400const struct dma_buf_ops amdgpu_dmabuf_ops = {
401 .attach = amdgpu_dma_buf_attach,
402 .detach = amdgpu_dma_buf_detach,
403 .pin = amdgpu_dma_buf_pin,
404 .unpin = amdgpu_dma_buf_unpin,
405 .map_dma_buf = amdgpu_dma_buf_map,
406 .unmap_dma_buf = amdgpu_dma_buf_unmap,
407 .release = drm_gem_dmabuf_release,
408 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
409 .mmap = drm_gem_dmabuf_mmap,
410 .vmap = drm_gem_dmabuf_vmap,
411 .vunmap = drm_gem_dmabuf_vunmap,
412};
413
414
415
416
417
418
419
420
421
422
423
424struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
425 int flags)
426{
427 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
428 struct dma_buf *buf;
429
430 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
431 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
432 return ERR_PTR(-EPERM);
433
434 buf = drm_gem_prime_export(gobj, flags);
435 if (!IS_ERR(buf))
436 buf->ops = &amdgpu_dmabuf_ops;
437
438 return buf;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453static struct drm_gem_object *
454amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
455{
456 struct dma_resv *resv = dma_buf->resv;
457 struct amdgpu_device *adev = dev->dev_private;
458 struct amdgpu_bo *bo;
459 struct amdgpu_bo_param bp;
460 int ret;
461
462 memset(&bp, 0, sizeof(bp));
463 bp.size = dma_buf->size;
464 bp.byte_align = PAGE_SIZE;
465 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
466 bp.flags = 0;
467 bp.type = ttm_bo_type_sg;
468 bp.resv = resv;
469 dma_resv_lock(resv, NULL);
470 ret = amdgpu_bo_create(adev, &bp, &bo);
471 if (ret)
472 goto error;
473
474 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
475 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
476 if (dma_buf->ops != &amdgpu_dmabuf_ops)
477 bo->prime_shared_count = 1;
478
479 dma_resv_unlock(resv);
480 return &bo->tbo.base;
481
482error:
483 dma_resv_unlock(resv);
484 return ERR_PTR(ret);
485}
486
487
488
489
490
491
492
493
494
495static void
496amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
497{
498 struct drm_gem_object *obj = attach->importer_priv;
499 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
500 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
501 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
502 struct ttm_operation_ctx ctx = { false, false };
503 struct ttm_placement placement = {};
504 struct amdgpu_vm_bo_base *bo_base;
505 int r;
506
507 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
508 return;
509
510 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
511 if (r) {
512 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
513 return;
514 }
515
516 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
517 struct amdgpu_vm *vm = bo_base->vm;
518 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
519
520 if (ticket) {
521
522
523
524
525 r = dma_resv_lock(resv, ticket);
526 if (r)
527 continue;
528
529 } else {
530
531
532
533
534 if (!dma_resv_trylock(resv))
535 continue;
536 }
537
538 r = amdgpu_vm_clear_freed(adev, vm, NULL);
539 if (!r)
540 r = amdgpu_vm_handle_moved(adev, vm);
541
542 if (r && r != -EBUSY)
543 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
544 r);
545
546 dma_resv_unlock(resv);
547 }
548}
549
550static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
551 .allow_peer2peer = true,
552 .move_notify = amdgpu_dma_buf_move_notify
553};
554
555
556
557
558
559
560
561
562
563
564
565struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
566 struct dma_buf *dma_buf)
567{
568 struct dma_buf_attachment *attach;
569 struct drm_gem_object *obj;
570
571 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
572 obj = dma_buf->priv;
573 if (obj->dev == dev) {
574
575
576
577
578 drm_gem_object_get(obj);
579 return obj;
580 }
581 }
582
583 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
584 if (IS_ERR(obj))
585 return obj;
586
587 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
588 &amdgpu_dma_buf_attach_ops, obj);
589 if (IS_ERR(attach)) {
590 drm_gem_object_put(obj);
591 return ERR_CAST(attach);
592 }
593
594 get_dma_buf(dma_buf);
595 obj->import_attach = attach;
596 return obj;
597}
598