1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43
44
45
46
47
48
49
50
51
52
53
54
55int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
56 struct vm_area_struct *vma)
57{
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
59 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
60 unsigned asize = amdgpu_bo_size(bo);
61 int ret;
62
63 if (!vma->vm_file)
64 return -ENODEV;
65
66 if (adev == NULL)
67 return -ENODEV;
68
69
70 if (asize < vma->vm_end - vma->vm_start)
71 return -EINVAL;
72
73 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
74 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
75 return -EPERM;
76 }
77 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
78
79
80 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
81 if (ret)
82 return ret;
83
84 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
85 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
86
87 return ret;
88}
89
90static int
91__dma_resv_make_exclusive(struct dma_resv *obj)
92{
93 struct dma_fence **fences;
94 unsigned int count;
95 int r;
96
97 if (!dma_resv_get_list(obj))
98 return 0;
99
100 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
101 if (r)
102 return r;
103
104 if (count == 0) {
105
106 } else if (count == 1) {
107 dma_resv_add_excl_fence(obj, fences[0]);
108 dma_fence_put(fences[0]);
109 kfree(fences);
110 } else {
111 struct dma_fence_array *array;
112
113 array = dma_fence_array_create(count, fences,
114 dma_fence_context_alloc(1), 0,
115 false);
116 if (!array)
117 goto err_fences_put;
118
119 dma_resv_add_excl_fence(obj, &array->base);
120 dma_fence_put(&array->base);
121 }
122
123 return 0;
124
125err_fences_put:
126 while (count--)
127 dma_fence_put(fences[count]);
128 kfree(fences);
129 return -ENOMEM;
130}
131
132
133
134
135
136
137
138
139
140static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
141 struct dma_buf_attachment *attach)
142{
143 struct drm_gem_object *obj = dmabuf->priv;
144 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
146 int r;
147
148 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
149 attach->peer2peer = false;
150
151 if (attach->dev->driver == adev->dev->driver)
152 return 0;
153
154 r = amdgpu_bo_reserve(bo, false);
155 if (unlikely(r != 0))
156 return r;
157
158
159
160
161
162
163
164
165
166 r = __dma_resv_make_exclusive(bo->tbo.base.resv);
167 if (r)
168 return r;
169
170 bo->prime_shared_count++;
171 amdgpu_bo_unreserve(bo);
172 return 0;
173}
174
175
176
177
178
179
180
181
182
183static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
184 struct dma_buf_attachment *attach)
185{
186 struct drm_gem_object *obj = dmabuf->priv;
187 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
188 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
189
190 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
191 bo->prime_shared_count--;
192}
193
194
195
196
197
198
199
200
201static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
202{
203 struct drm_gem_object *obj = attach->dmabuf->priv;
204 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
205
206
207 return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
208}
209
210
211
212
213
214
215
216
217static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
218{
219 struct drm_gem_object *obj = attach->dmabuf->priv;
220 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
221
222 amdgpu_bo_unpin(bo);
223}
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
239 enum dma_data_direction dir)
240{
241 struct dma_buf *dma_buf = attach->dmabuf;
242 struct drm_gem_object *obj = dma_buf->priv;
243 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
244 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
245 struct sg_table *sgt;
246 long r;
247
248 if (!bo->tbo.pin_count) {
249
250 struct ttm_operation_ctx ctx = { false, false };
251 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
252
253 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
254 attach->peer2peer) {
255 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
256 domains |= AMDGPU_GEM_DOMAIN_VRAM;
257 }
258 amdgpu_bo_placement_from_domain(bo, domains);
259 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
260 if (r)
261 return ERR_PTR(r);
262
263 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
264 AMDGPU_GEM_DOMAIN_GTT)) {
265 return ERR_PTR(-EBUSY);
266 }
267
268 switch (bo->tbo.mem.mem_type) {
269 case TTM_PL_TT:
270 sgt = drm_prime_pages_to_sg(obj->dev,
271 bo->tbo.ttm->pages,
272 bo->tbo.num_pages);
273 if (IS_ERR(sgt))
274 return sgt;
275
276 if (dma_map_sgtable(attach->dev, sgt, dir,
277 DMA_ATTR_SKIP_CPU_SYNC))
278 goto error_free;
279 break;
280
281 case TTM_PL_VRAM:
282 r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
283 dir, &sgt);
284 if (r)
285 return ERR_PTR(r);
286 break;
287 default:
288 return ERR_PTR(-EINVAL);
289 }
290
291 return sgt;
292
293error_free:
294 sg_free_table(sgt);
295 kfree(sgt);
296 return ERR_PTR(-EBUSY);
297}
298
299
300
301
302
303
304
305
306
307
308static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
309 struct sg_table *sgt,
310 enum dma_data_direction dir)
311{
312 struct dma_buf *dma_buf = attach->dmabuf;
313 struct drm_gem_object *obj = dma_buf->priv;
314 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
315 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
316
317 if (sgt->sgl->page_link) {
318 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
319 sg_free_table(sgt);
320 kfree(sgt);
321 } else {
322 amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
323 }
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
339 enum dma_data_direction direction)
340{
341 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
342 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
343 struct ttm_operation_ctx ctx = { true, false };
344 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
345 int ret;
346 bool reads = (direction == DMA_BIDIRECTIONAL ||
347 direction == DMA_FROM_DEVICE);
348
349 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
350 return 0;
351
352
353 ret = amdgpu_bo_reserve(bo, false);
354 if (unlikely(ret != 0))
355 return ret;
356
357 if (!bo->tbo.pin_count &&
358 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
359 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
360 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
361 }
362
363 amdgpu_bo_unreserve(bo);
364 return ret;
365}
366
367const struct dma_buf_ops amdgpu_dmabuf_ops = {
368 .attach = amdgpu_dma_buf_attach,
369 .detach = amdgpu_dma_buf_detach,
370 .pin = amdgpu_dma_buf_pin,
371 .unpin = amdgpu_dma_buf_unpin,
372 .map_dma_buf = amdgpu_dma_buf_map,
373 .unmap_dma_buf = amdgpu_dma_buf_unmap,
374 .release = drm_gem_dmabuf_release,
375 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
376 .mmap = drm_gem_dmabuf_mmap,
377 .vmap = drm_gem_dmabuf_vmap,
378 .vunmap = drm_gem_dmabuf_vunmap,
379};
380
381
382
383
384
385
386
387
388
389
390
391struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
392 int flags)
393{
394 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
395 struct dma_buf *buf;
396
397 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
398 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
399 return ERR_PTR(-EPERM);
400
401 buf = drm_gem_prime_export(gobj, flags);
402 if (!IS_ERR(buf))
403 buf->ops = &amdgpu_dmabuf_ops;
404
405 return buf;
406}
407
408
409
410
411
412
413
414
415
416
417
418
419
420static struct drm_gem_object *
421amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
422{
423 struct dma_resv *resv = dma_buf->resv;
424 struct amdgpu_device *adev = drm_to_adev(dev);
425 struct amdgpu_bo *bo;
426 struct amdgpu_bo_param bp;
427 struct drm_gem_object *gobj;
428 int ret;
429
430 memset(&bp, 0, sizeof(bp));
431 bp.size = dma_buf->size;
432 bp.byte_align = PAGE_SIZE;
433 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
434 bp.flags = 0;
435 bp.type = ttm_bo_type_sg;
436 bp.resv = resv;
437 dma_resv_lock(resv, NULL);
438 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
439 AMDGPU_GEM_DOMAIN_CPU,
440 0, ttm_bo_type_sg, resv, &gobj);
441 if (ret)
442 goto error;
443
444 bo = gem_to_amdgpu_bo(gobj);
445 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
446 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
447 if (dma_buf->ops != &amdgpu_dmabuf_ops)
448 bo->prime_shared_count = 1;
449
450 dma_resv_unlock(resv);
451 return gobj;
452
453error:
454 dma_resv_unlock(resv);
455 return ERR_PTR(ret);
456}
457
458
459
460
461
462
463
464
465
466static void
467amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
468{
469 struct drm_gem_object *obj = attach->importer_priv;
470 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
471 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
472 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
473 struct ttm_operation_ctx ctx = { false, false };
474 struct ttm_placement placement = {};
475 struct amdgpu_vm_bo_base *bo_base;
476 int r;
477
478 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
479 return;
480
481 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
482 if (r) {
483 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
484 return;
485 }
486
487 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
488 struct amdgpu_vm *vm = bo_base->vm;
489 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
490
491 if (ticket) {
492
493
494
495
496 r = dma_resv_lock(resv, ticket);
497 if (r)
498 continue;
499
500 } else {
501
502
503
504
505 if (!dma_resv_trylock(resv))
506 continue;
507 }
508
509 r = amdgpu_vm_clear_freed(adev, vm, NULL);
510 if (!r)
511 r = amdgpu_vm_handle_moved(adev, vm);
512
513 if (r && r != -EBUSY)
514 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
515 r);
516
517 dma_resv_unlock(resv);
518 }
519}
520
521static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
522 .allow_peer2peer = true,
523 .move_notify = amdgpu_dma_buf_move_notify
524};
525
526
527
528
529
530
531
532
533
534
535
536struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
537 struct dma_buf *dma_buf)
538{
539 struct dma_buf_attachment *attach;
540 struct drm_gem_object *obj;
541
542 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
543 obj = dma_buf->priv;
544 if (obj->dev == dev) {
545
546
547
548
549 drm_gem_object_get(obj);
550 return obj;
551 }
552 }
553
554 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
555 if (IS_ERR(obj))
556 return obj;
557
558 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
559 &amdgpu_dma_buf_attach_ops, obj);
560 if (IS_ERR(attach)) {
561 drm_gem_object_put(obj);
562 return ERR_CAST(attach);
563 }
564
565 get_dma_buf(dma_buf);
566 obj->import_attach = attach;
567 return obj;
568}
569
570
571
572
573
574
575
576
577
578
579bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
580 struct amdgpu_bo *bo)
581{
582 struct drm_gem_object *obj = &bo->tbo.base;
583 struct drm_gem_object *gobj;
584
585 if (obj->import_attach) {
586 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
587
588 if (dma_buf->ops != &amdgpu_dmabuf_ops)
589
590 return false;
591
592 gobj = dma_buf->priv;
593 bo = gem_to_amdgpu_bo(gobj);
594 }
595
596 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
597 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
598 return true;
599
600 return false;
601}
602