1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include "amdgpu_dma_buf.h"
38#include "amdgpu_xgmi.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42#include <linux/pci-p2pdma.h>
43#include <linux/pm_runtime.h>
44
45
46
47
48
49
50
51
52
53static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
54 struct dma_buf_attachment *attach)
55{
56 struct drm_gem_object *obj = dmabuf->priv;
57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
59 int r;
60
61 if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
62 attach->peer2peer = false;
63
64 if (attach->dev->driver == adev->dev->driver)
65 return 0;
66
67 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
68 if (r < 0)
69 goto out;
70
71 return 0;
72
73out:
74 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
75 return r;
76}
77
78
79
80
81
82
83
84
85
86static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
87 struct dma_buf_attachment *attach)
88{
89 struct drm_gem_object *obj = dmabuf->priv;
90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
91 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
92
93 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
94 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
95}
96
97
98
99
100
101
102
103
104static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
105{
106 struct drm_gem_object *obj = attach->dmabuf->priv;
107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
108 int r;
109
110
111 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
112 if (r)
113 return r;
114
115 if (bo->tbo.moving) {
116 r = dma_fence_wait(bo->tbo.moving, true);
117 if (r) {
118 amdgpu_bo_unpin(bo);
119 return r;
120 }
121 }
122 return 0;
123}
124
125
126
127
128
129
130
131
132static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
133{
134 struct drm_gem_object *obj = attach->dmabuf->priv;
135 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
136
137 amdgpu_bo_unpin(bo);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
154 enum dma_data_direction dir)
155{
156 struct dma_buf *dma_buf = attach->dmabuf;
157 struct drm_gem_object *obj = dma_buf->priv;
158 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
159 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
160 struct sg_table *sgt;
161 long r;
162
163 if (!bo->tbo.pin_count) {
164
165 struct ttm_operation_ctx ctx = { false, false };
166 unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
167
168 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
169 attach->peer2peer) {
170 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
171 domains |= AMDGPU_GEM_DOMAIN_VRAM;
172 }
173 amdgpu_bo_placement_from_domain(bo, domains);
174 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
175 if (r)
176 return ERR_PTR(r);
177
178 } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
179 AMDGPU_GEM_DOMAIN_GTT)) {
180 return ERR_PTR(-EBUSY);
181 }
182
183 switch (bo->tbo.resource->mem_type) {
184 case TTM_PL_TT:
185 sgt = drm_prime_pages_to_sg(obj->dev,
186 bo->tbo.ttm->pages,
187 bo->tbo.ttm->num_pages);
188 if (IS_ERR(sgt))
189 return sgt;
190
191 if (dma_map_sgtable(attach->dev, sgt, dir,
192 DMA_ATTR_SKIP_CPU_SYNC))
193 goto error_free;
194 break;
195
196 case TTM_PL_VRAM:
197 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
198 bo->tbo.base.size, attach->dev,
199 dir, &sgt);
200 if (r)
201 return ERR_PTR(r);
202 break;
203 default:
204 return ERR_PTR(-EINVAL);
205 }
206
207 return sgt;
208
209error_free:
210 sg_free_table(sgt);
211 kfree(sgt);
212 return ERR_PTR(-EBUSY);
213}
214
215
216
217
218
219
220
221
222
223
224static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
225 struct sg_table *sgt,
226 enum dma_data_direction dir)
227{
228 if (sgt->sgl->page_link) {
229 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
230 sg_free_table(sgt);
231 kfree(sgt);
232 } else {
233 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
234 }
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
250 enum dma_data_direction direction)
251{
252 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
253 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
254 struct ttm_operation_ctx ctx = { true, false };
255 u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
256 int ret;
257 bool reads = (direction == DMA_BIDIRECTIONAL ||
258 direction == DMA_FROM_DEVICE);
259
260 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
261 return 0;
262
263
264 ret = amdgpu_bo_reserve(bo, false);
265 if (unlikely(ret != 0))
266 return ret;
267
268 if (!bo->tbo.pin_count &&
269 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
270 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
271 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
272 }
273
274 amdgpu_bo_unreserve(bo);
275 return ret;
276}
277
278const struct dma_buf_ops amdgpu_dmabuf_ops = {
279 .attach = amdgpu_dma_buf_attach,
280 .detach = amdgpu_dma_buf_detach,
281 .pin = amdgpu_dma_buf_pin,
282 .unpin = amdgpu_dma_buf_unpin,
283 .map_dma_buf = amdgpu_dma_buf_map,
284 .unmap_dma_buf = amdgpu_dma_buf_unmap,
285 .release = drm_gem_dmabuf_release,
286 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
287 .mmap = drm_gem_dmabuf_mmap,
288 .vmap = drm_gem_dmabuf_vmap,
289 .vunmap = drm_gem_dmabuf_vunmap,
290};
291
292
293
294
295
296
297
298
299
300
301
302struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
303 int flags)
304{
305 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
306 struct dma_buf *buf;
307
308 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
309 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
310 return ERR_PTR(-EPERM);
311
312 buf = drm_gem_prime_export(gobj, flags);
313 if (!IS_ERR(buf))
314 buf->ops = &amdgpu_dmabuf_ops;
315
316 return buf;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331static struct drm_gem_object *
332amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
333{
334 struct dma_resv *resv = dma_buf->resv;
335 struct amdgpu_device *adev = drm_to_adev(dev);
336 struct drm_gem_object *gobj;
337 struct amdgpu_bo *bo;
338 uint64_t flags = 0;
339 int ret;
340
341 dma_resv_lock(resv, NULL);
342
343 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
344 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
345
346 flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
347 }
348
349 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
350 AMDGPU_GEM_DOMAIN_CPU, flags,
351 ttm_bo_type_sg, resv, &gobj);
352 if (ret)
353 goto error;
354
355 bo = gem_to_amdgpu_bo(gobj);
356 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
357 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
358
359 dma_resv_unlock(resv);
360 return gobj;
361
362error:
363 dma_resv_unlock(resv);
364 return ERR_PTR(ret);
365}
366
367
368
369
370
371
372
373
374
375static void
376amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
377{
378 struct drm_gem_object *obj = attach->importer_priv;
379 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
380 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
381 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
382 struct ttm_operation_ctx ctx = { false, false };
383 struct ttm_placement placement = {};
384 struct amdgpu_vm_bo_base *bo_base;
385 int r;
386
387 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
388 return;
389
390 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
391 if (r) {
392 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
393 return;
394 }
395
396 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
397 struct amdgpu_vm *vm = bo_base->vm;
398 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
399
400 if (ticket) {
401
402
403
404
405 r = dma_resv_lock(resv, ticket);
406 if (r)
407 continue;
408
409 } else {
410
411
412
413
414 if (!dma_resv_trylock(resv))
415 continue;
416 }
417
418 r = amdgpu_vm_clear_freed(adev, vm, NULL);
419 if (!r)
420 r = amdgpu_vm_handle_moved(adev, vm);
421
422 if (r && r != -EBUSY)
423 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
424 r);
425
426 dma_resv_unlock(resv);
427 }
428}
429
430static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
431 .allow_peer2peer = true,
432 .move_notify = amdgpu_dma_buf_move_notify
433};
434
435
436
437
438
439
440
441
442
443
444
445struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
446 struct dma_buf *dma_buf)
447{
448 struct dma_buf_attachment *attach;
449 struct drm_gem_object *obj;
450
451 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
452 obj = dma_buf->priv;
453 if (obj->dev == dev) {
454
455
456
457
458 drm_gem_object_get(obj);
459 return obj;
460 }
461 }
462
463 obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
464 if (IS_ERR(obj))
465 return obj;
466
467 attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
468 &amdgpu_dma_buf_attach_ops, obj);
469 if (IS_ERR(attach)) {
470 drm_gem_object_put(obj);
471 return ERR_CAST(attach);
472 }
473
474 get_dma_buf(dma_buf);
475 obj->import_attach = attach;
476 return obj;
477}
478
479
480
481
482
483
484
485
486
487
488bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
489 struct amdgpu_bo *bo)
490{
491 struct drm_gem_object *obj = &bo->tbo.base;
492 struct drm_gem_object *gobj;
493
494 if (obj->import_attach) {
495 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
496
497 if (dma_buf->ops != &amdgpu_dmabuf_ops)
498
499 return false;
500
501 gobj = dma_buf->priv;
502 bo = gem_to_amdgpu_bo(gobj);
503 }
504
505 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
506 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
507 return true;
508
509 return false;
510}
511