1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43#include <linux/pm_runtime.h>
44
45
46
47
48
49
50
51
52
53
54
55
56int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
57 struct vm_area_struct *vma)
58{
59 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
60 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
61 unsigned asize = amdgpu_bo_size(bo);
62 int ret;
63
64 if (!vma->vm_file)
65 return -ENODEV;
66
67 if (adev == NULL)
68 return -ENODEV;
69
70
71 if (asize < vma->vm_end - vma->vm_start)
72 return -EINVAL;
73
74 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
75 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
76 return -EPERM;
77 }
78 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
79
80
81 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
82 if (ret)
83 return ret;
84
85 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
86 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
87
88 return ret;
89}
90
91static int
92__dma_resv_make_exclusive(struct dma_resv *obj)
93{
94 struct dma_fence **fences;
95 unsigned int count;
96 int r;
97
98 if (!dma_resv_get_list(obj))
99 return 0;
100
101 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
102 if (r)
103 return r;
104
105 if (count == 0) {
106
107 } else if (count == 1) {
108 dma_resv_add_excl_fence(obj, fences[0]);
109 dma_fence_put(fences[0]);
110 kfree(fences);
111 } else {
112 struct dma_fence_array *array;
113
114 array = dma_fence_array_create(count, fences,
115 dma_fence_context_alloc(1), 0,
116 false);
117 if (!array)
118 goto err_fences_put;
119
120 dma_resv_add_excl_fence(obj, &array->base);
121 dma_fence_put(&array->base);
122 }
123
124 return 0;
125
126err_fences_put:
127 while (count--)
128 dma_fence_put(fences[count]);
129 kfree(fences);
130 return -ENOMEM;
131}
132
133
134
135
136
137
138
139
140
141static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
142 struct dma_buf_attachment *attach)
143{
144 struct drm_gem_object *obj = dmabuf->priv;
145 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
146 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
147 int r;
148
149 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
150 attach->peer2peer = false;
151
152 if (attach->dev->driver == adev->dev->driver)
153 return 0;
154
155 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
156 if (r < 0)
157 goto out;
158
159 r = amdgpu_bo_reserve(bo, false);
160 if (unlikely(r != 0))
161 goto out;
162
163
164
165
166
167
168
169
170
171 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
172 if (r)
173 goto out;
174
175 bo->prime_shared_count++;
176 amdgpu_bo_unreserve(bo);
177 return 0;
178
179out:
180 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
181 return r;
182}
183
184
185
186
187
188
189
190
191
192static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
193 struct dma_buf_attachment *attach)
194{
195 struct drm_gem_object *obj = dmabuf->priv;
196 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
197 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
198
199 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
200 bo->prime_shared_count--;
201
202 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
203 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
204}
205
206
207
208
209
210
211
212
213static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
214{
215 struct drm_gem_object *obj = attach->dmabuf->priv;
216 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
217 int r;
218
219
220 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
221 if (r)
222 return r;
223
224 if (bo->tbo.moving) {
225 r = dma_fence_wait(bo->tbo.moving, true);
226 if (r) {
227 amdgpu_bo_unpin(bo);
228 return r;
229 }
230 }
231 return 0;
232}
233
234
235
236
237
238
239
240
241static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
242{
243 struct drm_gem_object *obj = attach->dmabuf->priv;
244 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
245
246 amdgpu_bo_unpin(bo);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
263 enum dma_data_direction dir)
264{
265 struct dma_buf *dma_buf = attach->dmabuf;
266 struct drm_gem_object *obj = dma_buf->priv;
267 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
268 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
269 struct sg_table *sgt;
270 long r;
271
272 if (!bo->tbo.pin_count) {
273
274 struct ttm_operation_ctx ctx = { false, false };
275 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
276
277 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
278 attach->peer2peer) {
279 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
280 domains |= AMDGPU_GEM_DOMAIN_VRAM;
281 }
282 amdgpu_bo_placement_from_domain(bo, domains);
283 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
284 if (r)
285 return ERR_PTR(r);
286
287 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
288 AMDGPU_GEM_DOMAIN_GTT)) {
289 return ERR_PTR(-EBUSY);
290 }
291
292 switch (bo->tbo.mem.mem_type) {
293 case TTM_PL_TT:
294 sgt = drm_prime_pages_to_sg(obj->dev,
295 bo->tbo.ttm->pages,
296 bo->tbo.ttm->num_pages);
297 if (IS_ERR(sgt))
298 return sgt;
299
300 if (dma_map_sgtable(attach->dev, sgt, dir,
301 DMA_ATTR_SKIP_CPU_SYNC))
302 goto error_free;
303 break;
304
305 case TTM_PL_VRAM:
306 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
307 bo->tbo.base.size, attach->dev, dir, &sgt);
308 if (r)
309 return ERR_PTR(r);
310 break;
311 default:
312 return ERR_PTR(-EINVAL);
313 }
314
315 return sgt;
316
317error_free:
318 sg_free_table(sgt);
319 kfree(sgt);
320 return ERR_PTR(-EBUSY);
321}
322
323
324
325
326
327
328
329
330
331
332static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
333 struct sg_table *sgt,
334 enum dma_data_direction dir)
335{
336 if (sgt->sgl->page_link) {
337 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
338 sg_free_table(sgt);
339 kfree(sgt);
340 } else {
341 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
342 }
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
358 enum dma_data_direction direction)
359{
360 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
361 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
362 struct ttm_operation_ctx ctx = { true, false };
363 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
364 int ret;
365 bool reads = (direction == DMA_BIDIRECTIONAL ||
366 direction == DMA_FROM_DEVICE);
367
368 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
369 return 0;
370
371
372 ret = amdgpu_bo_reserve(bo, false);
373 if (unlikely(ret != 0))
374 return ret;
375
376 if (!bo->tbo.pin_count &&
377 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
378 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
379 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
380 }
381
382 amdgpu_bo_unreserve(bo);
383 return ret;
384}
385
386const struct dma_buf_ops amdgpu_dmabuf_ops = {
387 .attach = amdgpu_dma_buf_attach,
388 .detach = amdgpu_dma_buf_detach,
389 .pin = amdgpu_dma_buf_pin,
390 .unpin = amdgpu_dma_buf_unpin,
391 .map_dma_buf = amdgpu_dma_buf_map,
392 .unmap_dma_buf = amdgpu_dma_buf_unmap,
393 .release = drm_gem_dmabuf_release,
394 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
395 .mmap = drm_gem_dmabuf_mmap,
396 .vmap = drm_gem_dmabuf_vmap,
397 .vunmap = drm_gem_dmabuf_vunmap,
398};
399
400
401
402
403
404
405
406
407
408
409
410struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
411 int flags)
412{
413 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
414 struct dma_buf *buf;
415
416 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
417 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
418 return ERR_PTR(-EPERM);
419
420 buf = drm_gem_prime_export(gobj, flags);
421 if (!IS_ERR(buf))
422 buf->ops = &amdgpu_dmabuf_ops;
423
424 return buf;
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439static struct drm_gem_object *
440amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
441{
442 struct dma_resv *resv = dma_buf->resv;
443 struct amdgpu_device *adev = drm_to_adev(dev);
444 struct drm_gem_object *gobj;
445 struct amdgpu_bo *bo;
446 uint64_t flags = 0;
447 int ret;
448
449 dma_resv_lock(resv, NULL);
450
451 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
452 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
453
454 flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
455 }
456
457 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
458 AMDGPU_GEM_DOMAIN_CPU, flags,
459 ttm_bo_type_sg, resv, &gobj);
460 if (ret)
461 goto error;
462
463 bo = gem_to_amdgpu_bo(gobj);
464 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
465 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
466 if (dma_buf->ops != &amdgpu_dmabuf_ops)
467 bo->prime_shared_count = 1;
468
469 dma_resv_unlock(resv);
470 return gobj;
471
472error:
473 dma_resv_unlock(resv);
474 return ERR_PTR(ret);
475}
476
477
478
479
480
481
482
483
484
485static void
486amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
487{
488 struct drm_gem_object *obj = attach->importer_priv;
489 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
490 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
491 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
492 struct ttm_operation_ctx ctx = { false, false };
493 struct ttm_placement placement = {};
494 struct amdgpu_vm_bo_base *bo_base;
495 int r;
496
497 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
498 return;
499
500 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
501 if (r) {
502 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
503 return;
504 }
505
506 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
507 struct amdgpu_vm *vm = bo_base->vm;
508 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
509
510 if (ticket) {
511
512
513
514
515 r = dma_resv_lock(resv, ticket);
516 if (r)
517 continue;
518
519 } else {
520
521
522
523
524 if (!dma_resv_trylock(resv))
525 continue;
526 }
527
528 r = amdgpu_vm_clear_freed(adev, vm, NULL);
529 if (!r)
530 r = amdgpu_vm_handle_moved(adev, vm);
531
532 if (r && r != -EBUSY)
533 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
534 r);
535
536 dma_resv_unlock(resv);
537 }
538}
539
540static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
541 .allow_peer2peer = true,
542 .move_notify = amdgpu_dma_buf_move_notify
543};
544
545
546
547
548
549
550
551
552
553
554
555struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
556 struct dma_buf *dma_buf)
557{
558 struct dma_buf_attachment *attach;
559 struct drm_gem_object *obj;
560
561 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
562 obj = dma_buf->priv;
563 if (obj->dev == dev) {
564
565
566
567
568 drm_gem_object_get(obj);
569 return obj;
570 }
571 }
572
573 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
574 if (IS_ERR(obj))
575 return obj;
576
577 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
578 &amdgpu_dma_buf_attach_ops, obj);
579 if (IS_ERR(attach)) {
580 drm_gem_object_put(obj);
581 return ERR_CAST(attach);
582 }
583
584 get_dma_buf(dma_buf);
585 obj->import_attach = attach;
586 return obj;
587}
588
589
590
591
592
593
594
595
596
597
598bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
599 struct amdgpu_bo *bo)
600{
601 struct drm_gem_object *obj = &bo->tbo.base;
602 struct drm_gem_object *gobj;
603
604 if (obj->import_attach) {
605 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
606
607 if (dma_buf->ops != &amdgpu_dmabuf_ops)
608
609 return false;
610
611 gobj = dma_buf->priv;
612 bo = gem_to_amdgpu_bo(gobj);
613 }
614
615 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
616 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
617 return true;
618
619 return false;
620}
621