1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <drm/drmP.h>
35
36#include "amdgpu.h"
37#include "amdgpu_display.h"
38#include "amdgpu_gem.h"
39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
42
43
44
45
46
47
48
49
50
51struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
52{
53 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
54 int npages = bo->tbo.num_pages;
55
56 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
57}
58
59
60
61
62
63
64
65
66
67
68void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
69{
70 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
71 int ret;
72
73 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
74 &bo->dma_buf_vmap);
75 if (ret)
76 return ERR_PTR(ret);
77
78 return bo->dma_buf_vmap.virtual;
79}
80
81
82
83
84
85
86
87
88void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
89{
90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
91
92 ttm_bo_kunmap(&bo->dma_buf_vmap);
93}
94
95
96
97
98
99
100
101
102
103
104
105
106int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
107{
108 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110 unsigned asize = amdgpu_bo_size(bo);
111 int ret;
112
113 if (!vma->vm_file)
114 return -ENODEV;
115
116 if (adev == NULL)
117 return -ENODEV;
118
119
120 if (asize < vma->vm_end - vma->vm_start)
121 return -EINVAL;
122
123 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
124 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
125 return -EPERM;
126 }
127 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
128
129
130 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
131 if (ret)
132 return ret;
133
134 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
135 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
136
137 return ret;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153struct drm_gem_object *
154amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
155 struct dma_buf_attachment *attach,
156 struct sg_table *sg)
157{
158 struct reservation_object *resv = attach->dmabuf->resv;
159 struct amdgpu_device *adev = dev->dev_private;
160 struct amdgpu_bo *bo;
161 struct amdgpu_bo_param bp;
162 int ret;
163
164 memset(&bp, 0, sizeof(bp));
165 bp.size = attach->dmabuf->size;
166 bp.byte_align = PAGE_SIZE;
167 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
168 bp.flags = 0;
169 bp.type = ttm_bo_type_sg;
170 bp.resv = resv;
171 ww_mutex_lock(&resv->lock, NULL);
172 ret = amdgpu_bo_create(adev, &bp, &bo);
173 if (ret)
174 goto error;
175
176 bo->tbo.sg = sg;
177 bo->tbo.ttm->sg = sg;
178 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
179 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
180 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
181 bo->prime_shared_count = 1;
182
183 ww_mutex_unlock(&resv->lock);
184 return &bo->gem_base;
185
186error:
187 ww_mutex_unlock(&resv->lock);
188 return ERR_PTR(ret);
189}
190
191static int
192__reservation_object_make_exclusive(struct reservation_object *obj)
193{
194 struct dma_fence **fences;
195 unsigned int count;
196 int r;
197
198 if (!reservation_object_get_list(obj))
199 return 0;
200
201 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
202 if (r)
203 return r;
204
205 if (count == 0) {
206
207 } else if (count == 1) {
208 reservation_object_add_excl_fence(obj, fences[0]);
209 dma_fence_put(fences[0]);
210 kfree(fences);
211 } else {
212 struct dma_fence_array *array;
213
214 array = dma_fence_array_create(count, fences,
215 dma_fence_context_alloc(1), 0,
216 false);
217 if (!array)
218 goto err_fences_put;
219
220 reservation_object_add_excl_fence(obj, &array->base);
221 dma_fence_put(&array->base);
222 }
223
224 return 0;
225
226err_fences_put:
227 while (count--)
228 dma_fence_put(fences[count]);
229 kfree(fences);
230 return -ENOMEM;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
246 struct dma_buf_attachment *attach)
247{
248 struct drm_gem_object *obj = dma_buf->priv;
249 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
250 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
251 long r;
252
253 r = drm_gem_map_attach(dma_buf, attach);
254 if (r)
255 return r;
256
257 r = amdgpu_bo_reserve(bo, false);
258 if (unlikely(r != 0))
259 goto error_detach;
260
261
262 if (attach->dev->driver != adev->dev->driver) {
263
264
265
266
267
268
269
270
271 r = __reservation_object_make_exclusive(bo->tbo.resv);
272 if (r)
273 goto error_unreserve;
274 }
275
276
277 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
278 if (r)
279 goto error_unreserve;
280
281 if (attach->dev->driver != adev->dev->driver)
282 bo->prime_shared_count++;
283
284error_unreserve:
285 amdgpu_bo_unreserve(bo);
286
287error_detach:
288 if (r)
289 drm_gem_map_detach(dma_buf, attach);
290 return r;
291}
292
293
294
295
296
297
298
299
300
301static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
302 struct dma_buf_attachment *attach)
303{
304 struct drm_gem_object *obj = dma_buf->priv;
305 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
306 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
307 int ret = 0;
308
309 ret = amdgpu_bo_reserve(bo, true);
310 if (unlikely(ret != 0))
311 goto error;
312
313 amdgpu_bo_unpin(bo);
314 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
315 bo->prime_shared_count--;
316 amdgpu_bo_unreserve(bo);
317
318error:
319 drm_gem_map_detach(dma_buf, attach);
320}
321
322
323
324
325
326
327
328
329struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
330{
331 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
332
333 return bo->tbo.resv;
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
349 enum dma_data_direction direction)
350{
351 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
352 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
353 struct ttm_operation_ctx ctx = { true, false };
354 u32 domain = amdgpu_display_supported_domains(adev);
355 int ret;
356 bool reads = (direction == DMA_BIDIRECTIONAL ||
357 direction == DMA_FROM_DEVICE);
358
359 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
360 return 0;
361
362
363 ret = amdgpu_bo_reserve(bo, false);
364 if (unlikely(ret != 0))
365 return ret;
366
367 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
368 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
369 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
370 }
371
372 amdgpu_bo_unreserve(bo);
373 return ret;
374}
375
376const struct dma_buf_ops amdgpu_dmabuf_ops = {
377 .attach = amdgpu_gem_map_attach,
378 .detach = amdgpu_gem_map_detach,
379 .map_dma_buf = drm_gem_map_dma_buf,
380 .unmap_dma_buf = drm_gem_unmap_dma_buf,
381 .release = drm_gem_dmabuf_release,
382 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
383 .mmap = drm_gem_dmabuf_mmap,
384 .vmap = drm_gem_dmabuf_vmap,
385 .vunmap = drm_gem_dmabuf_vunmap,
386};
387
388
389
390
391
392
393
394
395
396
397
398
399
400struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
401 struct drm_gem_object *gobj,
402 int flags)
403{
404 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
405 struct dma_buf *buf;
406
407 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
408 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
409 return ERR_PTR(-EPERM);
410
411 buf = drm_gem_prime_export(dev, gobj, flags);
412 if (!IS_ERR(buf)) {
413 buf->file->f_mapping = dev->anon_inode->i_mapping;
414 buf->ops = &amdgpu_dmabuf_ops;
415 }
416
417 return buf;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
432 struct dma_buf *dma_buf)
433{
434 struct drm_gem_object *obj;
435
436 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
437 obj = dma_buf->priv;
438 if (obj->dev == dev) {
439
440
441
442
443 drm_gem_object_get(obj);
444 return obj;
445 }
446 }
447
448 return drm_gem_prime_import(dev, dma_buf);
449}
450