1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "amdgpu.h"
35#include "amdgpu_display.h"
36#include "amdgpu_gem.h"
37#include <drm/amdgpu_drm.h>
38#include <linux/dma-buf.h>
39#include <linux/dma-fence-array.h>
40
41
42
43
44
45
46
47
48
49struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
50{
51 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
52 int npages = bo->tbo.num_pages;
53
54 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
55}
56
57
58
59
60
61
62
63
64
65
66void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
67{
68 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
69 int ret;
70
71 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
72 &bo->dma_buf_vmap);
73 if (ret)
74 return ERR_PTR(ret);
75
76 return bo->dma_buf_vmap.virtual;
77}
78
79
80
81
82
83
84
85
86void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
87{
88 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
89
90 ttm_bo_kunmap(&bo->dma_buf_vmap);
91}
92
93
94
95
96
97
98
99
100
101
102
103
104int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
105 struct vm_area_struct *vma)
106{
107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
109 unsigned asize = amdgpu_bo_size(bo);
110 int ret;
111
112 if (!vma->vm_file)
113 return -ENODEV;
114
115 if (adev == NULL)
116 return -ENODEV;
117
118
119 if (asize < vma->vm_end - vma->vm_start)
120 return -EINVAL;
121
122 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
123 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
124 return -EPERM;
125 }
126 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
127
128
129 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
130 if (ret)
131 return ret;
132
133 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
134 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
135
136 return ret;
137}
138
139static int
140__reservation_object_make_exclusive(struct reservation_object *obj)
141{
142 struct dma_fence **fences;
143 unsigned int count;
144 int r;
145
146 if (!reservation_object_get_list(obj))
147 return 0;
148
149 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
150 if (r)
151 return r;
152
153 if (count == 0) {
154
155 } else if (count == 1) {
156 reservation_object_add_excl_fence(obj, fences[0]);
157 dma_fence_put(fences[0]);
158 kfree(fences);
159 } else {
160 struct dma_fence_array *array;
161
162 array = dma_fence_array_create(count, fences,
163 dma_fence_context_alloc(1), 0,
164 false);
165 if (!array)
166 goto err_fences_put;
167
168 reservation_object_add_excl_fence(obj, &array->base);
169 dma_fence_put(&array->base);
170 }
171
172 return 0;
173
174err_fences_put:
175 while (count--)
176 dma_fence_put(fences[count]);
177 kfree(fences);
178 return -ENOMEM;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192
193static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
194 struct dma_buf_attachment *attach)
195{
196 struct drm_gem_object *obj = dma_buf->priv;
197 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
198 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
199 long r;
200
201 r = drm_gem_map_attach(dma_buf, attach);
202 if (r)
203 return r;
204
205 r = amdgpu_bo_reserve(bo, false);
206 if (unlikely(r != 0))
207 goto error_detach;
208
209
210 if (attach->dev->driver != adev->dev->driver) {
211
212
213
214
215
216
217
218
219 r = __reservation_object_make_exclusive(bo->tbo.resv);
220 if (r)
221 goto error_unreserve;
222 }
223
224
225 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
226 if (r)
227 goto error_unreserve;
228
229 if (attach->dev->driver != adev->dev->driver)
230 bo->prime_shared_count++;
231
232error_unreserve:
233 amdgpu_bo_unreserve(bo);
234
235error_detach:
236 if (r)
237 drm_gem_map_detach(dma_buf, attach);
238 return r;
239}
240
241
242
243
244
245
246
247
248
249static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
250 struct dma_buf_attachment *attach)
251{
252 struct drm_gem_object *obj = dma_buf->priv;
253 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
254 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
255 int ret = 0;
256
257 ret = amdgpu_bo_reserve(bo, true);
258 if (unlikely(ret != 0))
259 goto error;
260
261 amdgpu_bo_unpin(bo);
262 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
263 bo->prime_shared_count--;
264 amdgpu_bo_unreserve(bo);
265
266error:
267 drm_gem_map_detach(dma_buf, attach);
268}
269
270
271
272
273
274
275
276
277struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
278{
279 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
280
281 return bo->tbo.resv;
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
297 enum dma_data_direction direction)
298{
299 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
300 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
301 struct ttm_operation_ctx ctx = { true, false };
302 u32 domain = amdgpu_display_supported_domains(adev);
303 int ret;
304 bool reads = (direction == DMA_BIDIRECTIONAL ||
305 direction == DMA_FROM_DEVICE);
306
307 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
308 return 0;
309
310
311 ret = amdgpu_bo_reserve(bo, false);
312 if (unlikely(ret != 0))
313 return ret;
314
315 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
316 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
317 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
318 }
319
320 amdgpu_bo_unreserve(bo);
321 return ret;
322}
323
324const struct dma_buf_ops amdgpu_dmabuf_ops = {
325 .attach = amdgpu_dma_buf_map_attach,
326 .detach = amdgpu_dma_buf_map_detach,
327 .map_dma_buf = drm_gem_map_dma_buf,
328 .unmap_dma_buf = drm_gem_unmap_dma_buf,
329 .release = drm_gem_dmabuf_release,
330 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
331 .mmap = drm_gem_dmabuf_mmap,
332 .vmap = drm_gem_dmabuf_vmap,
333 .vunmap = drm_gem_dmabuf_vunmap,
334};
335
336
337
338
339
340
341
342
343
344
345
346
347
348struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
349 struct drm_gem_object *gobj,
350 int flags)
351{
352 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
353 struct dma_buf *buf;
354
355 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
356 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
357 return ERR_PTR(-EPERM);
358
359 buf = drm_gem_prime_export(dev, gobj, flags);
360 if (!IS_ERR(buf)) {
361 buf->file->f_mapping = dev->anon_inode->i_mapping;
362 buf->ops = &amdgpu_dmabuf_ops;
363 }
364
365 return buf;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381struct drm_gem_object *
382amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
383 struct dma_buf_attachment *attach,
384 struct sg_table *sg)
385{
386 struct reservation_object *resv = attach->dmabuf->resv;
387 struct amdgpu_device *adev = dev->dev_private;
388 struct amdgpu_bo *bo;
389 struct amdgpu_bo_param bp;
390 int ret;
391
392 memset(&bp, 0, sizeof(bp));
393 bp.size = attach->dmabuf->size;
394 bp.byte_align = PAGE_SIZE;
395 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
396 bp.flags = 0;
397 bp.type = ttm_bo_type_sg;
398 bp.resv = resv;
399 ww_mutex_lock(&resv->lock, NULL);
400 ret = amdgpu_bo_create(adev, &bp, &bo);
401 if (ret)
402 goto error;
403
404 bo->tbo.sg = sg;
405 bo->tbo.ttm->sg = sg;
406 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
407 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
408 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
409 bo->prime_shared_count = 1;
410
411 ww_mutex_unlock(&resv->lock);
412 return &bo->gem_base;
413
414error:
415 ww_mutex_unlock(&resv->lock);
416 return ERR_PTR(ret);
417}
418
419
420
421
422
423
424
425
426
427
428
429
430struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
431 struct dma_buf *dma_buf)
432{
433 struct drm_gem_object *obj;
434
435 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
436 obj = dma_buf->priv;
437 if (obj->dev == dev) {
438
439
440
441
442 drm_gem_object_get(obj);
443 return obj;
444 }
445 }
446
447 return drm_gem_prime_import(dev, dma_buf);
448}
449