1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43
44
45
46
47
48
49
50
51
52
53void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
54{
55 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
56 int ret;
57
58 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
59 &bo->dma_buf_vmap);
60 if (ret)
61 return ERR_PTR(ret);
62
63 return bo->dma_buf_vmap.virtual;
64}
65
66
67
68
69
70
71
72
73void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
74{
75 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
76
77 ttm_bo_kunmap(&bo->dma_buf_vmap);
78}
79
80
81
82
83
84
85
86
87
88
89
90
91int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
92 struct vm_area_struct *vma)
93{
94 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
95 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
96 unsigned asize = amdgpu_bo_size(bo);
97 int ret;
98
99 if (!vma->vm_file)
100 return -ENODEV;
101
102 if (adev == NULL)
103 return -ENODEV;
104
105
106 if (asize < vma->vm_end - vma->vm_start)
107 return -EINVAL;
108
109 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
110 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
111 return -EPERM;
112 }
113 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
114
115
116 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
117 if (ret)
118 return ret;
119
120 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
121 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
122
123 return ret;
124}
125
126static int
127__dma_resv_make_exclusive(struct dma_resv *obj)
128{
129 struct dma_fence **fences;
130 unsigned int count;
131 int r;
132
133 if (!dma_resv_get_list(obj))
134 return 0;
135
136 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
137 if (r)
138 return r;
139
140 if (count == 0) {
141
142 } else if (count == 1) {
143 dma_resv_add_excl_fence(obj, fences[0]);
144 dma_fence_put(fences[0]);
145 kfree(fences);
146 } else {
147 struct dma_fence_array *array;
148
149 array = dma_fence_array_create(count, fences,
150 dma_fence_context_alloc(1), 0,
151 false);
152 if (!array)
153 goto err_fences_put;
154
155 dma_resv_add_excl_fence(obj, &array->base);
156 dma_fence_put(&array->base);
157 }
158
159 return 0;
160
161err_fences_put:
162 while (count--)
163 dma_fence_put(fences[count]);
164 kfree(fences);
165 return -ENOMEM;
166}
167
168
169
170
171
172
173
174
175
176static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
177 struct dma_buf_attachment *attach)
178{
179 struct drm_gem_object *obj = dmabuf->priv;
180 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
181 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
182 int r;
183
184 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
185 attach->peer2peer = false;
186
187 if (attach->dev->driver == adev->dev->driver)
188 return 0;
189
190 r = amdgpu_bo_reserve(bo, false);
191 if (unlikely(r != 0))
192 return r;
193
194
195
196
197
198
199
200
201
202 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
203 if (r)
204 return r;
205
206 bo->prime_shared_count++;
207 amdgpu_bo_unreserve(bo);
208 return 0;
209}
210
211
212
213
214
215
216
217
218
219static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
220 struct dma_buf_attachment *attach)
221{
222 struct drm_gem_object *obj = dmabuf->priv;
223 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
224 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
225
226 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
227 bo->prime_shared_count--;
228}
229
230
231
232
233
234
235
236
237static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
238{
239 struct drm_gem_object *obj = attach->dmabuf->priv;
240 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
241
242
243 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
244}
245
246
247
248
249
250
251
252
253static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
254{
255 struct drm_gem_object *obj = attach->dmabuf->priv;
256 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
257
258 amdgpu_bo_unpin(bo);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
275 enum dma_data_direction dir)
276{
277 struct dma_buf *dma_buf = attach->dmabuf;
278 struct drm_gem_object *obj = dma_buf->priv;
279 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
280 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
281 struct sg_table *sgt;
282 long r;
283
284 if (!bo->pin_count) {
285
286 struct ttm_operation_ctx ctx = { false, false };
287 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
288
289 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
290 attach->peer2peer) {
291 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
292 domains |= AMDGPU_GEM_DOMAIN_VRAM;
293 }
294 amdgpu_bo_placement_from_domain(bo, domains);
295 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
296 if (r)
297 return ERR_PTR(r);
298
299 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
300 AMDGPU_GEM_DOMAIN_GTT)) {
301 return ERR_PTR(-EBUSY);
302 }
303
304 switch (bo->tbo.mem.mem_type) {
305 case TTM_PL_TT:
306 sgt = drm_prime_pages_to_sg(obj->dev,
307 bo->tbo.ttm->pages,
308 bo->tbo.num_pages);
309 if (IS_ERR(sgt))
310 return sgt;
311
312 if (dma_map_sgtable(attach->dev, sgt, dir,
313 DMA_ATTR_SKIP_CPU_SYNC))
314 goto error_free;
315 break;
316
317 case TTM_PL_VRAM:
318 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
319 dir, &sgt);
320 if (r)
321 return ERR_PTR(r);
322 break;
323 default:
324 return ERR_PTR(-EINVAL);
325 }
326
327 return sgt;
328
329error_free:
330 sg_free_table(sgt);
331 kfree(sgt);
332 return ERR_PTR(-EBUSY);
333}
334
335
336
337
338
339
340
341
342
343
344static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
345 struct sg_table *sgt,
346 enum dma_data_direction dir)
347{
348 struct dma_buf *dma_buf = attach->dmabuf;
349 struct drm_gem_object *obj = dma_buf->priv;
350 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
351 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
352
353 if (sgt->sgl->page_link) {
354 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
355 sg_free_table(sgt);
356 kfree(sgt);
357 } else {
358 amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
359 }
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
375 enum dma_data_direction direction)
376{
377 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
378 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
379 struct ttm_operation_ctx ctx = { true, false };
380 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
381 int ret;
382 bool reads = (direction == DMA_BIDIRECTIONAL ||
383 direction == DMA_FROM_DEVICE);
384
385 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
386 return 0;
387
388
389 ret = amdgpu_bo_reserve(bo, false);
390 if (unlikely(ret != 0))
391 return ret;
392
393 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
394 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
395 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
396 }
397
398 amdgpu_bo_unreserve(bo);
399 return ret;
400}
401
402const struct dma_buf_ops amdgpu_dmabuf_ops = {
403 .attach = amdgpu_dma_buf_attach,
404 .detach = amdgpu_dma_buf_detach,
405 .pin = amdgpu_dma_buf_pin,
406 .unpin = amdgpu_dma_buf_unpin,
407 .map_dma_buf = amdgpu_dma_buf_map,
408 .unmap_dma_buf = amdgpu_dma_buf_unmap,
409 .release = drm_gem_dmabuf_release,
410 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
411 .mmap = drm_gem_dmabuf_mmap,
412 .vmap = drm_gem_dmabuf_vmap,
413 .vunmap = drm_gem_dmabuf_vunmap,
414};
415
416
417
418
419
420
421
422
423
424
425
426struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
427 int flags)
428{
429 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
430 struct dma_buf *buf;
431
432 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
433 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
434 return ERR_PTR(-EPERM);
435
436 buf = drm_gem_prime_export(gobj, flags);
437 if (!IS_ERR(buf))
438 buf->ops = &amdgpu_dmabuf_ops;
439
440 return buf;
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455static struct drm_gem_object *
456amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
457{
458 struct dma_resv *resv = dma_buf->resv;
459 struct amdgpu_device *adev = drm_to_adev(dev);
460 struct amdgpu_bo *bo;
461 struct amdgpu_bo_param bp;
462 struct drm_gem_object *gobj;
463 int ret;
464
465 memset(&bp, 0, sizeof(bp));
466 bp.size = dma_buf->size;
467 bp.byte_align = PAGE_SIZE;
468 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
469 bp.flags = 0;
470 bp.type = ttm_bo_type_sg;
471 bp.resv = resv;
472 dma_resv_lock(resv, NULL);
473 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
474 AMDGPU_GEM_DOMAIN_CPU,
475 0, ttm_bo_type_sg, resv, &gobj);
476 if (ret)
477 goto error;
478
479 bo = gem_to_amdgpu_bo(gobj);
480 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
481 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
482 if (dma_buf->ops != &amdgpu_dmabuf_ops)
483 bo->prime_shared_count = 1;
484
485 dma_resv_unlock(resv);
486 return gobj;
487
488error:
489 dma_resv_unlock(resv);
490 return ERR_PTR(ret);
491}
492
493
494
495
496
497
498
499
500
501static void
502amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
503{
504 struct drm_gem_object *obj = attach->importer_priv;
505 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
506 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
507 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
508 struct ttm_operation_ctx ctx = { false, false };
509 struct ttm_placement placement = {};
510 struct amdgpu_vm_bo_base *bo_base;
511 int r;
512
513 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
514 return;
515
516 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
517 if (r) {
518 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
519 return;
520 }
521
522 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
523 struct amdgpu_vm *vm = bo_base->vm;
524 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
525
526 if (ticket) {
527
528
529
530
531 r = dma_resv_lock(resv, ticket);
532 if (r)
533 continue;
534
535 } else {
536
537
538
539
540 if (!dma_resv_trylock(resv))
541 continue;
542 }
543
544 r = amdgpu_vm_clear_freed(adev, vm, NULL);
545 if (!r)
546 r = amdgpu_vm_handle_moved(adev, vm);
547
548 if (r && r != -EBUSY)
549 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
550 r);
551
552 dma_resv_unlock(resv);
553 }
554}
555
556static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
557 .allow_peer2peer = true,
558 .move_notify = amdgpu_dma_buf_move_notify
559};
560
561
562
563
564
565
566
567
568
569
570
571struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
572 struct dma_buf *dma_buf)
573{
574 struct dma_buf_attachment *attach;
575 struct drm_gem_object *obj;
576
577 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
578 obj = dma_buf->priv;
579 if (obj->dev == dev) {
580
581
582
583
584 drm_gem_object_get(obj);
585 return obj;
586 }
587 }
588
589 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
590 if (IS_ERR(obj))
591 return obj;
592
593 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
594 &amdgpu_dma_buf_attach_ops, obj);
595 if (IS_ERR(attach)) {
596 drm_gem_object_put(obj);
597 return ERR_CAST(attach);
598 }
599
600 get_dma_buf(dma_buf);
601 obj->import_attach = attach;
602 return obj;
603}
604
605
606
607
608
609
610
611
612
613
614bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
615 struct amdgpu_bo *bo)
616{
617 struct drm_gem_object *obj = &bo->tbo.base;
618 struct drm_gem_object *gobj;
619
620 if (obj->import_attach) {
621 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
622
623 if (dma_buf->ops != &amdgpu_dmabuf_ops)
624
625 return false;
626
627 gobj = dma_buf->priv;
628 bo = gem_to_amdgpu_bo(gobj);
629 }
630
631 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
632 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
633 return true;
634
635 return false;
636}
637