1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/file.h>
29#include <linux/pagemap.h>
30#include <linux/sync_file.h>
31#include <linux/dma-buf.h>
32
33#include <drm/amdgpu_drm.h>
34#include <drm/drm_syncobj.h>
35#include "amdgpu_cs.h"
36#include "amdgpu.h"
37#include "amdgpu_trace.h"
38#include "amdgpu_gmc.h"
39#include "amdgpu_gem.h"
40#include "amdgpu_ras.h"
41
42static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
43 struct drm_amdgpu_cs_chunk_fence *data,
44 uint32_t *offset)
45{
46 struct drm_gem_object *gobj;
47 struct amdgpu_bo *bo;
48 unsigned long size;
49 int r;
50
51 gobj = drm_gem_object_lookup(p->filp, data->handle);
52 if (gobj == NULL)
53 return -EINVAL;
54
55 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
56 p->uf_entry.priority = 0;
57 p->uf_entry.tv.bo = &bo->tbo;
58
59 p->uf_entry.tv.num_shared = 2;
60
61 drm_gem_object_put(gobj);
62
63 size = amdgpu_bo_size(bo);
64 if (size != PAGE_SIZE || (data->offset + 8) > size) {
65 r = -EINVAL;
66 goto error_unref;
67 }
68
69 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
70 r = -EINVAL;
71 goto error_unref;
72 }
73
74 *offset = data->offset;
75
76 return 0;
77
78error_unref:
79 amdgpu_bo_unref(&bo);
80 return r;
81}
82
83static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
84 struct drm_amdgpu_bo_list_in *data)
85{
86 int r;
87 struct drm_amdgpu_bo_list_entry *info = NULL;
88
89 r = amdgpu_bo_create_list_entry_array(data, &info);
90 if (r)
91 return r;
92
93 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
94 &p->bo_list);
95 if (r)
96 goto error_free;
97
98 kvfree(info);
99 return 0;
100
101error_free:
102 kvfree(info);
103
104 return r;
105}
106
107static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
108{
109 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
110 struct amdgpu_vm *vm = &fpriv->vm;
111 uint64_t *chunk_array_user;
112 uint64_t *chunk_array;
113 unsigned size, num_ibs = 0;
114 uint32_t uf_offset = 0;
115 int i;
116 int ret;
117
118 if (cs->in.num_chunks == 0)
119 return 0;
120
121 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
122 if (!chunk_array)
123 return -ENOMEM;
124
125 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
126 if (!p->ctx) {
127 ret = -EINVAL;
128 goto free_chunk;
129 }
130
131 mutex_lock(&p->ctx->lock);
132
133
134 if (atomic_read(&p->ctx->guilty) == 1) {
135 ret = -ECANCELED;
136 goto free_chunk;
137 }
138
139
140 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
141 if (copy_from_user(chunk_array, chunk_array_user,
142 sizeof(uint64_t)*cs->in.num_chunks)) {
143 ret = -EFAULT;
144 goto free_chunk;
145 }
146
147 p->nchunks = cs->in.num_chunks;
148 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
149 GFP_KERNEL);
150 if (!p->chunks) {
151 ret = -ENOMEM;
152 goto free_chunk;
153 }
154
155 for (i = 0; i < p->nchunks; i++) {
156 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
157 struct drm_amdgpu_cs_chunk user_chunk;
158 uint32_t __user *cdata;
159
160 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
161 if (copy_from_user(&user_chunk, chunk_ptr,
162 sizeof(struct drm_amdgpu_cs_chunk))) {
163 ret = -EFAULT;
164 i--;
165 goto free_partial_kdata;
166 }
167 p->chunks[i].chunk_id = user_chunk.chunk_id;
168 p->chunks[i].length_dw = user_chunk.length_dw;
169
170 size = p->chunks[i].length_dw;
171 cdata = u64_to_user_ptr(user_chunk.chunk_data);
172
173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
174 if (p->chunks[i].kdata == NULL) {
175 ret = -ENOMEM;
176 i--;
177 goto free_partial_kdata;
178 }
179 size *= sizeof(uint32_t);
180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
181 ret = -EFAULT;
182 goto free_partial_kdata;
183 }
184
185 switch (p->chunks[i].chunk_id) {
186 case AMDGPU_CHUNK_ID_IB:
187 ++num_ibs;
188 break;
189
190 case AMDGPU_CHUNK_ID_FENCE:
191 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
192 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
193 ret = -EINVAL;
194 goto free_partial_kdata;
195 }
196
197 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
198 &uf_offset);
199 if (ret)
200 goto free_partial_kdata;
201
202 break;
203
204 case AMDGPU_CHUNK_ID_BO_HANDLES:
205 size = sizeof(struct drm_amdgpu_bo_list_in);
206 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
207 ret = -EINVAL;
208 goto free_partial_kdata;
209 }
210
211 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
212 if (ret)
213 goto free_partial_kdata;
214
215 break;
216
217 case AMDGPU_CHUNK_ID_DEPENDENCIES:
218 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
219 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
220 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
221 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
222 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
223 break;
224
225 default:
226 ret = -EINVAL;
227 goto free_partial_kdata;
228 }
229 }
230
231 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
232 if (ret)
233 goto free_all_kdata;
234
235 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
236 ret = -ECANCELED;
237 goto free_all_kdata;
238 }
239
240 if (p->uf_entry.tv.bo)
241 p->job->uf_addr = uf_offset;
242 kvfree(chunk_array);
243
244
245 amdgpu_vm_set_task_info(vm);
246
247 return 0;
248
249free_all_kdata:
250 i = p->nchunks - 1;
251free_partial_kdata:
252 for (; i >= 0; i--)
253 kvfree(p->chunks[i].kdata);
254 kvfree(p->chunks);
255 p->chunks = NULL;
256 p->nchunks = 0;
257free_chunk:
258 kvfree(chunk_array);
259
260 return ret;
261}
262
263
264static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
265{
266 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
267 return 0;
268
269
270
271
272 return us << adev->mm_stats.log2_max_MBps;
273}
274
275static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
276{
277 if (!adev->mm_stats.log2_max_MBps)
278 return 0;
279
280 return bytes >> adev->mm_stats.log2_max_MBps;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
297 u64 *max_bytes,
298 u64 *max_vis_bytes)
299{
300 s64 time_us, increment_us;
301 u64 free_vram, total_vram, used_vram;
302
303
304
305
306
307
308
309 const s64 us_upper_bound = 200000;
310
311 if (!adev->mm_stats.log2_max_MBps) {
312 *max_bytes = 0;
313 *max_vis_bytes = 0;
314 return;
315 }
316
317 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
318 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
319 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
320
321 spin_lock(&adev->mm_stats.lock);
322
323
324 time_us = ktime_to_us(ktime_get());
325 increment_us = time_us - adev->mm_stats.last_update_us;
326 adev->mm_stats.last_update_us = time_us;
327 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
328 us_upper_bound);
329
330
331
332
333
334
335
336
337
338
339
340
341
342 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
343 s64 min_us;
344
345
346
347
348 if (!(adev->flags & AMD_IS_APU))
349 min_us = bytes_to_us(adev, free_vram / 4);
350 else
351 min_us = 0;
352
353 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
354 }
355
356
357
358
359 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
360
361
362 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
363 u64 total_vis_vram = adev->gmc.visible_vram_size;
364 u64 used_vis_vram =
365 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
366
367 if (used_vis_vram < total_vis_vram) {
368 u64 free_vis_vram = total_vis_vram - used_vis_vram;
369 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
370 increment_us, us_upper_bound);
371
372 if (free_vis_vram >= total_vis_vram / 2)
373 adev->mm_stats.accum_us_vis =
374 max(bytes_to_us(adev, free_vis_vram / 2),
375 adev->mm_stats.accum_us_vis);
376 }
377
378 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
379 } else {
380 *max_vis_bytes = 0;
381 }
382
383 spin_unlock(&adev->mm_stats.lock);
384}
385
386
387
388
389
390void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
391 u64 num_vis_bytes)
392{
393 spin_lock(&adev->mm_stats.lock);
394 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
395 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
396 spin_unlock(&adev->mm_stats.lock);
397}
398
399static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
400{
401 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402 struct amdgpu_cs_parser *p = param;
403 struct ttm_operation_ctx ctx = {
404 .interruptible = true,
405 .no_wait_gpu = false,
406 .resv = bo->tbo.base.resv
407 };
408 uint32_t domain;
409 int r;
410
411 if (bo->tbo.pin_count)
412 return 0;
413
414
415
416
417 if (p->bytes_moved < p->bytes_moved_threshold &&
418 (!bo->tbo.base.dma_buf ||
419 list_empty(&bo->tbo.base.dma_buf->attachments))) {
420 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
421 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
422
423
424
425
426 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
427 domain = bo->preferred_domains;
428 else
429 domain = bo->allowed_domains;
430 } else {
431 domain = bo->preferred_domains;
432 }
433 } else {
434 domain = bo->allowed_domains;
435 }
436
437retry:
438 amdgpu_bo_placement_from_domain(bo, domain);
439 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
440
441 p->bytes_moved += ctx.bytes_moved;
442 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
443 amdgpu_bo_in_cpu_visible_vram(bo))
444 p->bytes_moved_vis += ctx.bytes_moved;
445
446 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
447 domain = bo->allowed_domains;
448 goto retry;
449 }
450
451 return r;
452}
453
454static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
455 struct list_head *validated)
456{
457 struct ttm_operation_ctx ctx = { true, false };
458 struct amdgpu_bo_list_entry *lobj;
459 int r;
460
461 list_for_each_entry(lobj, validated, tv.head) {
462 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
463 struct mm_struct *usermm;
464
465 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
466 if (usermm && usermm != current->mm)
467 return -EPERM;
468
469 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
470 lobj->user_invalidated && lobj->user_pages) {
471 amdgpu_bo_placement_from_domain(bo,
472 AMDGPU_GEM_DOMAIN_CPU);
473 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
474 if (r)
475 return r;
476
477 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
478 lobj->user_pages);
479 }
480
481 r = amdgpu_cs_bo_validate(p, bo);
482 if (r)
483 return r;
484
485 kvfree(lobj->user_pages);
486 lobj->user_pages = NULL;
487 }
488 return 0;
489}
490
491static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
492 union drm_amdgpu_cs *cs)
493{
494 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
495 struct amdgpu_vm *vm = &fpriv->vm;
496 struct amdgpu_bo_list_entry *e;
497 struct list_head duplicates;
498 struct amdgpu_bo *gds;
499 struct amdgpu_bo *gws;
500 struct amdgpu_bo *oa;
501 int r;
502
503 INIT_LIST_HEAD(&p->validated);
504
505
506 if (cs->in.bo_list_handle) {
507 if (p->bo_list)
508 return -EINVAL;
509
510 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
511 &p->bo_list);
512 if (r)
513 return r;
514 } else if (!p->bo_list) {
515
516 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
517 &p->bo_list);
518 if (r)
519 return r;
520 }
521
522
523 amdgpu_bo_list_for_each_entry(e, p->bo_list)
524 e->tv.num_shared = 2;
525
526 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
527
528 INIT_LIST_HEAD(&duplicates);
529 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
530
531 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
532 list_add(&p->uf_entry.tv.head, &p->validated);
533
534
535
536
537
538 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
539 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
540 bool userpage_invalidated = false;
541 int i;
542
543 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
544 sizeof(struct page *),
545 GFP_KERNEL | __GFP_ZERO);
546 if (!e->user_pages) {
547 DRM_ERROR("kvmalloc_array failure\n");
548 return -ENOMEM;
549 }
550
551 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
552 if (r) {
553 kvfree(e->user_pages);
554 e->user_pages = NULL;
555 return r;
556 }
557
558 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
559 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
560 userpage_invalidated = true;
561 break;
562 }
563 }
564 e->user_invalidated = userpage_invalidated;
565 }
566
567 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
568 &duplicates);
569 if (unlikely(r != 0)) {
570 if (r != -ERESTARTSYS)
571 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
572 goto out;
573 }
574
575 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
576 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
577
578 e->bo_va = amdgpu_vm_bo_find(vm, bo);
579
580 if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
581 e->chain = dma_fence_chain_alloc();
582 if (!e->chain) {
583 r = -ENOMEM;
584 goto error_validate;
585 }
586 }
587 }
588
589
590
591
592 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
593 if (unlikely(r != 0)) {
594 if (r != -ERESTARTSYS)
595 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
596 goto error_validate;
597 }
598
599 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
600 &p->bytes_moved_vis_threshold);
601 p->bytes_moved = 0;
602 p->bytes_moved_vis = 0;
603
604 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
605 amdgpu_cs_bo_validate, p);
606 if (r) {
607 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
608 goto error_validate;
609 }
610
611 r = amdgpu_cs_list_validate(p, &duplicates);
612 if (r)
613 goto error_validate;
614
615 r = amdgpu_cs_list_validate(p, &p->validated);
616 if (r)
617 goto error_validate;
618
619 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
620 p->bytes_moved_vis);
621
622 gds = p->bo_list->gds_obj;
623 gws = p->bo_list->gws_obj;
624 oa = p->bo_list->oa_obj;
625
626 if (gds) {
627 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
628 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
629 }
630 if (gws) {
631 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
632 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
633 }
634 if (oa) {
635 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
636 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
637 }
638
639 if (!r && p->uf_entry.tv.bo) {
640 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
641
642 r = amdgpu_ttm_alloc_gart(&uf->tbo);
643 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
644 }
645
646error_validate:
647 if (r) {
648 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
649 dma_fence_chain_free(e->chain);
650 e->chain = NULL;
651 }
652 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
653 }
654out:
655 return r;
656}
657
658static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
659{
660 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
661 struct amdgpu_bo_list_entry *e;
662 int r;
663
664 list_for_each_entry(e, &p->validated, tv.head) {
665 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
666 struct dma_resv *resv = bo->tbo.base.resv;
667 enum amdgpu_sync_mode sync_mode;
668
669 sync_mode = amdgpu_bo_explicit_sync(bo) ?
670 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
671 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
672 &fpriv->vm);
673 if (r)
674 return r;
675 }
676 return 0;
677}
678
679
680
681
682
683
684
685
686
687
688static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
689 bool backoff)
690{
691 unsigned i;
692
693 if (error && backoff) {
694 struct amdgpu_bo_list_entry *e;
695
696 amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
697 dma_fence_chain_free(e->chain);
698 e->chain = NULL;
699 }
700
701 ttm_eu_backoff_reservation(&parser->ticket,
702 &parser->validated);
703 }
704
705 for (i = 0; i < parser->num_post_deps; i++) {
706 drm_syncobj_put(parser->post_deps[i].syncobj);
707 kfree(parser->post_deps[i].chain);
708 }
709 kfree(parser->post_deps);
710
711 dma_fence_put(parser->fence);
712
713 if (parser->ctx) {
714 mutex_unlock(&parser->ctx->lock);
715 amdgpu_ctx_put(parser->ctx);
716 }
717 if (parser->bo_list)
718 amdgpu_bo_list_put(parser->bo_list);
719
720 for (i = 0; i < parser->nchunks; i++)
721 kvfree(parser->chunks[i].kdata);
722 kvfree(parser->chunks);
723 if (parser->job)
724 amdgpu_job_free(parser->job);
725 if (parser->uf_entry.tv.bo) {
726 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
727
728 amdgpu_bo_unref(&uf);
729 }
730}
731
732static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
733{
734 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
735 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
736 struct amdgpu_device *adev = p->adev;
737 struct amdgpu_vm *vm = &fpriv->vm;
738 struct amdgpu_bo_list_entry *e;
739 struct amdgpu_bo_va *bo_va;
740 struct amdgpu_bo *bo;
741 int r;
742
743
744 if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
745 unsigned i, j;
746
747 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
748 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
749 struct amdgpu_bo_va_mapping *m;
750 struct amdgpu_bo *aobj = NULL;
751 struct amdgpu_cs_chunk *chunk;
752 uint64_t offset, va_start;
753 struct amdgpu_ib *ib;
754 uint8_t *kptr;
755
756 chunk = &p->chunks[i];
757 ib = &p->job->ibs[j];
758 chunk_ib = chunk->kdata;
759
760 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
761 continue;
762
763 va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
764 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
765 if (r) {
766 DRM_ERROR("IB va_start is invalid\n");
767 return r;
768 }
769
770 if ((va_start + chunk_ib->ib_bytes) >
771 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
772 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
773 return -EINVAL;
774 }
775
776
777 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
778 if (r) {
779 return r;
780 }
781
782 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
783 kptr += va_start - offset;
784
785 if (ring->funcs->parse_cs) {
786 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
787 amdgpu_bo_kunmap(aobj);
788
789 r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
790 if (r)
791 return r;
792 } else {
793 ib->ptr = (uint32_t *)kptr;
794 r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
795 amdgpu_bo_kunmap(aobj);
796 if (r)
797 return r;
798 }
799
800 j++;
801 }
802 }
803
804 if (!p->job->vm)
805 return amdgpu_cs_sync_rings(p);
806
807
808 r = amdgpu_vm_clear_freed(adev, vm, NULL);
809 if (r)
810 return r;
811
812 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
813 if (r)
814 return r;
815
816 r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
817 if (r)
818 return r;
819
820 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
821 bo_va = fpriv->csa_va;
822 BUG_ON(!bo_va);
823 r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
824 if (r)
825 return r;
826
827 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
828 if (r)
829 return r;
830 }
831
832 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
833
834 bo = ttm_to_amdgpu_bo(e->tv.bo);
835 if (!bo)
836 continue;
837
838 bo_va = e->bo_va;
839 if (bo_va == NULL)
840 continue;
841
842 r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
843 if (r)
844 return r;
845
846 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
847 if (r)
848 return r;
849 }
850
851 r = amdgpu_vm_handle_moved(adev, vm);
852 if (r)
853 return r;
854
855 r = amdgpu_vm_update_pdes(adev, vm, false);
856 if (r)
857 return r;
858
859 r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
860 if (r)
861 return r;
862
863 p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
864
865 if (amdgpu_vm_debug) {
866
867 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
868 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
869
870
871 if (!bo)
872 continue;
873
874 amdgpu_vm_bo_invalidate(adev, bo, false);
875 }
876 }
877
878 return amdgpu_cs_sync_rings(p);
879}
880
881static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
882 struct amdgpu_cs_parser *parser)
883{
884 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
885 struct amdgpu_vm *vm = &fpriv->vm;
886 int r, ce_preempt = 0, de_preempt = 0;
887 struct amdgpu_ring *ring;
888 int i, j;
889
890 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
891 struct amdgpu_cs_chunk *chunk;
892 struct amdgpu_ib *ib;
893 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
894 struct drm_sched_entity *entity;
895
896 chunk = &parser->chunks[i];
897 ib = &parser->job->ibs[j];
898 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
899
900 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
901 continue;
902
903 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
904 (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
905 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
906 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
907 ce_preempt++;
908 else
909 de_preempt++;
910 }
911
912
913 if (ce_preempt > 1 || de_preempt > 1)
914 return -EINVAL;
915 }
916
917 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
918 chunk_ib->ip_instance, chunk_ib->ring,
919 &entity);
920 if (r)
921 return r;
922
923 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
924 parser->job->preamble_status |=
925 AMDGPU_PREAMBLE_IB_PRESENT;
926
927 if (parser->entity && parser->entity != entity)
928 return -EINVAL;
929
930
931
932 if (entity->rq == NULL)
933 return -EINVAL;
934
935 parser->entity = entity;
936
937 ring = to_amdgpu_ring(entity->rq->sched);
938 r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
939 chunk_ib->ib_bytes : 0,
940 AMDGPU_IB_POOL_DELAYED, ib);
941 if (r) {
942 DRM_ERROR("Failed to get ib !\n");
943 return r;
944 }
945
946 ib->gpu_addr = chunk_ib->va_start;
947 ib->length_dw = chunk_ib->ib_bytes / 4;
948 ib->flags = chunk_ib->flags;
949
950 j++;
951 }
952
953
954 ring = to_amdgpu_ring(parser->entity->rq->sched);
955 if (parser->job->uf_addr && ring->funcs->no_user_fence)
956 return -EINVAL;
957
958 return 0;
959}
960
961static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
962 struct amdgpu_cs_chunk *chunk)
963{
964 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
965 unsigned num_deps;
966 int i, r;
967 struct drm_amdgpu_cs_chunk_dep *deps;
968
969 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
970 num_deps = chunk->length_dw * 4 /
971 sizeof(struct drm_amdgpu_cs_chunk_dep);
972
973 for (i = 0; i < num_deps; ++i) {
974 struct amdgpu_ctx *ctx;
975 struct drm_sched_entity *entity;
976 struct dma_fence *fence;
977
978 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
979 if (ctx == NULL)
980 return -EINVAL;
981
982 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
983 deps[i].ip_instance,
984 deps[i].ring, &entity);
985 if (r) {
986 amdgpu_ctx_put(ctx);
987 return r;
988 }
989
990 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
991 amdgpu_ctx_put(ctx);
992
993 if (IS_ERR(fence))
994 return PTR_ERR(fence);
995 else if (!fence)
996 continue;
997
998 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
999 struct drm_sched_fence *s_fence;
1000 struct dma_fence *old = fence;
1001
1002 s_fence = to_drm_sched_fence(fence);
1003 fence = dma_fence_get(&s_fence->scheduled);
1004 dma_fence_put(old);
1005 }
1006
1007 r = amdgpu_sync_fence(&p->job->sync, fence);
1008 dma_fence_put(fence);
1009 if (r)
1010 return r;
1011 }
1012 return 0;
1013}
1014
1015static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1016 uint32_t handle, u64 point,
1017 u64 flags)
1018{
1019 struct dma_fence *fence;
1020 int r;
1021
1022 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1023 if (r) {
1024 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1025 handle, point, r);
1026 return r;
1027 }
1028
1029 r = amdgpu_sync_fence(&p->job->sync, fence);
1030 dma_fence_put(fence);
1031
1032 return r;
1033}
1034
1035static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1036 struct amdgpu_cs_chunk *chunk)
1037{
1038 struct drm_amdgpu_cs_chunk_sem *deps;
1039 unsigned num_deps;
1040 int i, r;
1041
1042 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1043 num_deps = chunk->length_dw * 4 /
1044 sizeof(struct drm_amdgpu_cs_chunk_sem);
1045 for (i = 0; i < num_deps; ++i) {
1046 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1047 0, 0);
1048 if (r)
1049 return r;
1050 }
1051
1052 return 0;
1053}
1054
1055
1056static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1057 struct amdgpu_cs_chunk *chunk)
1058{
1059 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1060 unsigned num_deps;
1061 int i, r;
1062
1063 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1064 num_deps = chunk->length_dw * 4 /
1065 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1066 for (i = 0; i < num_deps; ++i) {
1067 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1068 syncobj_deps[i].handle,
1069 syncobj_deps[i].point,
1070 syncobj_deps[i].flags);
1071 if (r)
1072 return r;
1073 }
1074
1075 return 0;
1076}
1077
1078static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1079 struct amdgpu_cs_chunk *chunk)
1080{
1081 struct drm_amdgpu_cs_chunk_sem *deps;
1082 unsigned num_deps;
1083 int i;
1084
1085 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1086 num_deps = chunk->length_dw * 4 /
1087 sizeof(struct drm_amdgpu_cs_chunk_sem);
1088
1089 if (p->post_deps)
1090 return -EINVAL;
1091
1092 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1093 GFP_KERNEL);
1094 p->num_post_deps = 0;
1095
1096 if (!p->post_deps)
1097 return -ENOMEM;
1098
1099
1100 for (i = 0; i < num_deps; ++i) {
1101 p->post_deps[i].syncobj =
1102 drm_syncobj_find(p->filp, deps[i].handle);
1103 if (!p->post_deps[i].syncobj)
1104 return -EINVAL;
1105 p->post_deps[i].chain = NULL;
1106 p->post_deps[i].point = 0;
1107 p->num_post_deps++;
1108 }
1109
1110 return 0;
1111}
1112
1113
1114static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1115 struct amdgpu_cs_chunk *chunk)
1116{
1117 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1118 unsigned num_deps;
1119 int i;
1120
1121 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1122 num_deps = chunk->length_dw * 4 /
1123 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1124
1125 if (p->post_deps)
1126 return -EINVAL;
1127
1128 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1129 GFP_KERNEL);
1130 p->num_post_deps = 0;
1131
1132 if (!p->post_deps)
1133 return -ENOMEM;
1134
1135 for (i = 0; i < num_deps; ++i) {
1136 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1137
1138 dep->chain = NULL;
1139 if (syncobj_deps[i].point) {
1140 dep->chain = dma_fence_chain_alloc();
1141 if (!dep->chain)
1142 return -ENOMEM;
1143 }
1144
1145 dep->syncobj = drm_syncobj_find(p->filp,
1146 syncobj_deps[i].handle);
1147 if (!dep->syncobj) {
1148 dma_fence_chain_free(dep->chain);
1149 return -EINVAL;
1150 }
1151 dep->point = syncobj_deps[i].point;
1152 p->num_post_deps++;
1153 }
1154
1155 return 0;
1156}
1157
1158static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1159 struct amdgpu_cs_parser *p)
1160{
1161 int i, r;
1162
1163
1164 mutex_unlock(&p->ctx->lock);
1165
1166 for (i = 0; i < p->nchunks; ++i) {
1167 struct amdgpu_cs_chunk *chunk;
1168
1169 chunk = &p->chunks[i];
1170
1171 switch (chunk->chunk_id) {
1172 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1173 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1174 r = amdgpu_cs_process_fence_dep(p, chunk);
1175 if (r)
1176 goto out;
1177 break;
1178 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1179 r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1180 if (r)
1181 goto out;
1182 break;
1183 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1184 r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1185 if (r)
1186 goto out;
1187 break;
1188 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1189 r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1190 if (r)
1191 goto out;
1192 break;
1193 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1194 r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1195 if (r)
1196 goto out;
1197 break;
1198 }
1199 }
1200
1201out:
1202 mutex_lock(&p->ctx->lock);
1203 return r;
1204}
1205
1206static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1207{
1208 int i;
1209
1210 for (i = 0; i < p->num_post_deps; ++i) {
1211 if (p->post_deps[i].chain && p->post_deps[i].point) {
1212 drm_syncobj_add_point(p->post_deps[i].syncobj,
1213 p->post_deps[i].chain,
1214 p->fence, p->post_deps[i].point);
1215 p->post_deps[i].chain = NULL;
1216 } else {
1217 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1218 p->fence);
1219 }
1220 }
1221}
1222
1223static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1224 union drm_amdgpu_cs *cs)
1225{
1226 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1227 struct drm_sched_entity *entity = p->entity;
1228 struct amdgpu_bo_list_entry *e;
1229 struct amdgpu_job *job;
1230 uint64_t seq;
1231 int r;
1232
1233 job = p->job;
1234 p->job = NULL;
1235
1236 r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1237 if (r)
1238 goto error_unlock;
1239
1240 drm_sched_job_arm(&job->base);
1241
1242
1243
1244
1245
1246 mutex_lock(&p->adev->notifier_lock);
1247
1248
1249
1250
1251 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1252 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1253
1254 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1255 }
1256 if (r) {
1257 r = -EAGAIN;
1258 goto error_abort;
1259 }
1260
1261 p->fence = dma_fence_get(&job->base.s_fence->finished);
1262
1263 amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1264 amdgpu_cs_post_dependencies(p);
1265
1266 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1267 !p->ctx->preamble_presented) {
1268 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1269 p->ctx->preamble_presented = true;
1270 }
1271
1272 cs->out.handle = seq;
1273 job->uf_sequence = seq;
1274
1275 amdgpu_job_free_resources(job);
1276
1277 trace_amdgpu_cs_ioctl(job);
1278 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1279 drm_sched_entity_push_job(&job->base);
1280
1281 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1282
1283 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1284 struct dma_resv *resv = e->tv.bo->base.resv;
1285 struct dma_fence_chain *chain = e->chain;
1286
1287 if (!chain)
1288 continue;
1289
1290
1291
1292
1293
1294
1295 dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
1296 dma_fence_get(p->fence), 1);
1297
1298 rcu_assign_pointer(resv->fence_excl, &chain->base);
1299 e->chain = NULL;
1300 }
1301
1302 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1303 mutex_unlock(&p->adev->notifier_lock);
1304
1305 return 0;
1306
1307error_abort:
1308 drm_sched_job_cleanup(&job->base);
1309 mutex_unlock(&p->adev->notifier_lock);
1310
1311error_unlock:
1312 amdgpu_job_free(job);
1313 return r;
1314}
1315
1316static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1317{
1318 int i;
1319
1320 if (!trace_amdgpu_cs_enabled())
1321 return;
1322
1323 for (i = 0; i < parser->job->num_ibs; i++)
1324 trace_amdgpu_cs(parser, i);
1325}
1326
1327int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1328{
1329 struct amdgpu_device *adev = drm_to_adev(dev);
1330 union drm_amdgpu_cs *cs = data;
1331 struct amdgpu_cs_parser parser = {};
1332 bool reserved_buffers = false;
1333 int r;
1334
1335 if (amdgpu_ras_intr_triggered())
1336 return -EHWPOISON;
1337
1338 if (!adev->accel_working)
1339 return -EBUSY;
1340
1341 parser.adev = adev;
1342 parser.filp = filp;
1343
1344 r = amdgpu_cs_parser_init(&parser, data);
1345 if (r) {
1346 if (printk_ratelimit())
1347 DRM_ERROR("Failed to initialize parser %d!\n", r);
1348 goto out;
1349 }
1350
1351 r = amdgpu_cs_ib_fill(adev, &parser);
1352 if (r)
1353 goto out;
1354
1355 r = amdgpu_cs_dependencies(adev, &parser);
1356 if (r) {
1357 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1358 goto out;
1359 }
1360
1361 r = amdgpu_cs_parser_bos(&parser, data);
1362 if (r) {
1363 if (r == -ENOMEM)
1364 DRM_ERROR("Not enough memory for command submission!\n");
1365 else if (r != -ERESTARTSYS && r != -EAGAIN)
1366 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1367 goto out;
1368 }
1369
1370 reserved_buffers = true;
1371
1372 trace_amdgpu_cs_ibs(&parser);
1373
1374 r = amdgpu_cs_vm_handling(&parser);
1375 if (r)
1376 goto out;
1377
1378 r = amdgpu_cs_submit(&parser, cs);
1379
1380out:
1381 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1382
1383 return r;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1396 struct drm_file *filp)
1397{
1398 union drm_amdgpu_wait_cs *wait = data;
1399 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1400 struct drm_sched_entity *entity;
1401 struct amdgpu_ctx *ctx;
1402 struct dma_fence *fence;
1403 long r;
1404
1405 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1406 if (ctx == NULL)
1407 return -EINVAL;
1408
1409 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1410 wait->in.ring, &entity);
1411 if (r) {
1412 amdgpu_ctx_put(ctx);
1413 return r;
1414 }
1415
1416 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1417 if (IS_ERR(fence))
1418 r = PTR_ERR(fence);
1419 else if (fence) {
1420 r = dma_fence_wait_timeout(fence, true, timeout);
1421 if (r > 0 && fence->error)
1422 r = fence->error;
1423 dma_fence_put(fence);
1424 } else
1425 r = 1;
1426
1427 amdgpu_ctx_put(ctx);
1428 if (r < 0)
1429 return r;
1430
1431 memset(wait, 0, sizeof(*wait));
1432 wait->out.status = (r == 0);
1433
1434 return 0;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1445 struct drm_file *filp,
1446 struct drm_amdgpu_fence *user)
1447{
1448 struct drm_sched_entity *entity;
1449 struct amdgpu_ctx *ctx;
1450 struct dma_fence *fence;
1451 int r;
1452
1453 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1454 if (ctx == NULL)
1455 return ERR_PTR(-EINVAL);
1456
1457 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1458 user->ring, &entity);
1459 if (r) {
1460 amdgpu_ctx_put(ctx);
1461 return ERR_PTR(r);
1462 }
1463
1464 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1465 amdgpu_ctx_put(ctx);
1466
1467 return fence;
1468}
1469
1470int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1471 struct drm_file *filp)
1472{
1473 struct amdgpu_device *adev = drm_to_adev(dev);
1474 union drm_amdgpu_fence_to_handle *info = data;
1475 struct dma_fence *fence;
1476 struct drm_syncobj *syncobj;
1477 struct sync_file *sync_file;
1478 int fd, r;
1479
1480 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1481 if (IS_ERR(fence))
1482 return PTR_ERR(fence);
1483
1484 if (!fence)
1485 fence = dma_fence_get_stub();
1486
1487 switch (info->in.what) {
1488 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1489 r = drm_syncobj_create(&syncobj, 0, fence);
1490 dma_fence_put(fence);
1491 if (r)
1492 return r;
1493 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1494 drm_syncobj_put(syncobj);
1495 return r;
1496
1497 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1498 r = drm_syncobj_create(&syncobj, 0, fence);
1499 dma_fence_put(fence);
1500 if (r)
1501 return r;
1502 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1503 drm_syncobj_put(syncobj);
1504 return r;
1505
1506 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1507 fd = get_unused_fd_flags(O_CLOEXEC);
1508 if (fd < 0) {
1509 dma_fence_put(fence);
1510 return fd;
1511 }
1512
1513 sync_file = sync_file_create(fence);
1514 dma_fence_put(fence);
1515 if (!sync_file) {
1516 put_unused_fd(fd);
1517 return -ENOMEM;
1518 }
1519
1520 fd_install(fd, sync_file->file);
1521 info->out.handle = fd;
1522 return 0;
1523
1524 default:
1525 dma_fence_put(fence);
1526 return -EINVAL;
1527 }
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1539 struct drm_file *filp,
1540 union drm_amdgpu_wait_fences *wait,
1541 struct drm_amdgpu_fence *fences)
1542{
1543 uint32_t fence_count = wait->in.fence_count;
1544 unsigned int i;
1545 long r = 1;
1546
1547 for (i = 0; i < fence_count; i++) {
1548 struct dma_fence *fence;
1549 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1550
1551 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1552 if (IS_ERR(fence))
1553 return PTR_ERR(fence);
1554 else if (!fence)
1555 continue;
1556
1557 r = dma_fence_wait_timeout(fence, true, timeout);
1558 dma_fence_put(fence);
1559 if (r < 0)
1560 return r;
1561
1562 if (r == 0)
1563 break;
1564
1565 if (fence->error)
1566 return fence->error;
1567 }
1568
1569 memset(wait, 0, sizeof(*wait));
1570 wait->out.status = (r > 0);
1571
1572 return 0;
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1584 struct drm_file *filp,
1585 union drm_amdgpu_wait_fences *wait,
1586 struct drm_amdgpu_fence *fences)
1587{
1588 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1589 uint32_t fence_count = wait->in.fence_count;
1590 uint32_t first = ~0;
1591 struct dma_fence **array;
1592 unsigned int i;
1593 long r;
1594
1595
1596 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1597
1598 if (array == NULL)
1599 return -ENOMEM;
1600
1601 for (i = 0; i < fence_count; i++) {
1602 struct dma_fence *fence;
1603
1604 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1605 if (IS_ERR(fence)) {
1606 r = PTR_ERR(fence);
1607 goto err_free_fence_array;
1608 } else if (fence) {
1609 array[i] = fence;
1610 } else {
1611 r = 1;
1612 first = i;
1613 goto out;
1614 }
1615 }
1616
1617 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1618 &first);
1619 if (r < 0)
1620 goto err_free_fence_array;
1621
1622out:
1623 memset(wait, 0, sizeof(*wait));
1624 wait->out.status = (r > 0);
1625 wait->out.first_signaled = first;
1626
1627 if (first < fence_count && array[first])
1628 r = array[first]->error;
1629 else
1630 r = 0;
1631
1632err_free_fence_array:
1633 for (i = 0; i < fence_count; i++)
1634 dma_fence_put(array[i]);
1635 kfree(array);
1636
1637 return r;
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1648 struct drm_file *filp)
1649{
1650 struct amdgpu_device *adev = drm_to_adev(dev);
1651 union drm_amdgpu_wait_fences *wait = data;
1652 uint32_t fence_count = wait->in.fence_count;
1653 struct drm_amdgpu_fence *fences_user;
1654 struct drm_amdgpu_fence *fences;
1655 int r;
1656
1657
1658 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1659 GFP_KERNEL);
1660 if (fences == NULL)
1661 return -ENOMEM;
1662
1663 fences_user = u64_to_user_ptr(wait->in.fences);
1664 if (copy_from_user(fences, fences_user,
1665 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1666 r = -EFAULT;
1667 goto err_free_fences;
1668 }
1669
1670 if (wait->in.wait_all)
1671 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1672 else
1673 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1674
1675err_free_fences:
1676 kfree(fences);
1677
1678 return r;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1694 uint64_t addr, struct amdgpu_bo **bo,
1695 struct amdgpu_bo_va_mapping **map)
1696{
1697 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1698 struct ttm_operation_ctx ctx = { false, false };
1699 struct amdgpu_vm *vm = &fpriv->vm;
1700 struct amdgpu_bo_va_mapping *mapping;
1701 int r;
1702
1703 addr /= AMDGPU_GPU_PAGE_SIZE;
1704
1705 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1706 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1707 return -EINVAL;
1708
1709 *bo = mapping->bo_va->base.bo;
1710 *map = mapping;
1711
1712
1713 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1714 return -EINVAL;
1715
1716 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1717 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1718 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1719 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1720 if (r)
1721 return r;
1722 }
1723
1724 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1725}
1726