1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/file.h>
29#include <linux/pagemap.h>
30#include <linux/sync_file.h>
31
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_syncobj.h>
34#include "amdgpu.h"
35#include "amdgpu_trace.h"
36#include "amdgpu_gmc.h"
37#include "amdgpu_gem.h"
38#include "amdgpu_ras.h"
39
40static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
41 struct drm_amdgpu_cs_chunk_fence *data,
42 uint32_t *offset)
43{
44 struct drm_gem_object *gobj;
45 struct amdgpu_bo *bo;
46 unsigned long size;
47 int r;
48
49 gobj = drm_gem_object_lookup(p->filp, data->handle);
50 if (gobj == NULL)
51 return -EINVAL;
52
53 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
54 p->uf_entry.priority = 0;
55 p->uf_entry.tv.bo = &bo->tbo;
56
57 p->uf_entry.tv.num_shared = 2;
58
59 drm_gem_object_put_unlocked(gobj);
60
61 size = amdgpu_bo_size(bo);
62 if (size != PAGE_SIZE || (data->offset + 8) > size) {
63 r = -EINVAL;
64 goto error_unref;
65 }
66
67 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
68 r = -EINVAL;
69 goto error_unref;
70 }
71
72 *offset = data->offset;
73
74 return 0;
75
76error_unref:
77 amdgpu_bo_unref(&bo);
78 return r;
79}
80
81static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
82 struct drm_amdgpu_bo_list_in *data)
83{
84 int r;
85 struct drm_amdgpu_bo_list_entry *info = NULL;
86
87 r = amdgpu_bo_create_list_entry_array(data, &info);
88 if (r)
89 return r;
90
91 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
92 &p->bo_list);
93 if (r)
94 goto error_free;
95
96 kvfree(info);
97 return 0;
98
99error_free:
100 if (info)
101 kvfree(info);
102
103 return r;
104}
105
106static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
107{
108 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
109 struct amdgpu_vm *vm = &fpriv->vm;
110 uint64_t *chunk_array_user;
111 uint64_t *chunk_array;
112 unsigned size, num_ibs = 0;
113 uint32_t uf_offset = 0;
114 int i;
115 int ret;
116
117 if (cs->in.num_chunks == 0)
118 return 0;
119
120 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
121 if (!chunk_array)
122 return -ENOMEM;
123
124 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
125 if (!p->ctx) {
126 ret = -EINVAL;
127 goto free_chunk;
128 }
129
130 mutex_lock(&p->ctx->lock);
131
132
133 if (atomic_read(&p->ctx->guilty) == 1) {
134 ret = -ECANCELED;
135 goto free_chunk;
136 }
137
138
139 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
140 if (copy_from_user(chunk_array, chunk_array_user,
141 sizeof(uint64_t)*cs->in.num_chunks)) {
142 ret = -EFAULT;
143 goto free_chunk;
144 }
145
146 p->nchunks = cs->in.num_chunks;
147 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
148 GFP_KERNEL);
149 if (!p->chunks) {
150 ret = -ENOMEM;
151 goto free_chunk;
152 }
153
154 for (i = 0; i < p->nchunks; i++) {
155 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
156 struct drm_amdgpu_cs_chunk user_chunk;
157 uint32_t __user *cdata;
158
159 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
160 if (copy_from_user(&user_chunk, chunk_ptr,
161 sizeof(struct drm_amdgpu_cs_chunk))) {
162 ret = -EFAULT;
163 i--;
164 goto free_partial_kdata;
165 }
166 p->chunks[i].chunk_id = user_chunk.chunk_id;
167 p->chunks[i].length_dw = user_chunk.length_dw;
168
169 size = p->chunks[i].length_dw;
170 cdata = u64_to_user_ptr(user_chunk.chunk_data);
171
172 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
173 if (p->chunks[i].kdata == NULL) {
174 ret = -ENOMEM;
175 i--;
176 goto free_partial_kdata;
177 }
178 size *= sizeof(uint32_t);
179 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
180 ret = -EFAULT;
181 goto free_partial_kdata;
182 }
183
184 switch (p->chunks[i].chunk_id) {
185 case AMDGPU_CHUNK_ID_IB:
186 ++num_ibs;
187 break;
188
189 case AMDGPU_CHUNK_ID_FENCE:
190 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
191 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
192 ret = -EINVAL;
193 goto free_partial_kdata;
194 }
195
196 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
197 &uf_offset);
198 if (ret)
199 goto free_partial_kdata;
200
201 break;
202
203 case AMDGPU_CHUNK_ID_BO_HANDLES:
204 size = sizeof(struct drm_amdgpu_bo_list_in);
205 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
206 ret = -EINVAL;
207 goto free_partial_kdata;
208 }
209
210 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
211 if (ret)
212 goto free_partial_kdata;
213
214 break;
215
216 case AMDGPU_CHUNK_ID_DEPENDENCIES:
217 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
218 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
219 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
220 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
221 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
222 break;
223
224 default:
225 ret = -EINVAL;
226 goto free_partial_kdata;
227 }
228 }
229
230 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
231 if (ret)
232 goto free_all_kdata;
233
234 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
235 ret = -ECANCELED;
236 goto free_all_kdata;
237 }
238
239 if (p->uf_entry.tv.bo)
240 p->job->uf_addr = uf_offset;
241 kfree(chunk_array);
242
243
244 amdgpu_vm_set_task_info(vm);
245
246 return 0;
247
248free_all_kdata:
249 i = p->nchunks - 1;
250free_partial_kdata:
251 for (; i >= 0; i--)
252 kvfree(p->chunks[i].kdata);
253 kfree(p->chunks);
254 p->chunks = NULL;
255 p->nchunks = 0;
256free_chunk:
257 kfree(chunk_array);
258
259 return ret;
260}
261
262
263static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
264{
265 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
266 return 0;
267
268
269
270
271 return us << adev->mm_stats.log2_max_MBps;
272}
273
274static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
275{
276 if (!adev->mm_stats.log2_max_MBps)
277 return 0;
278
279 return bytes >> adev->mm_stats.log2_max_MBps;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
296 u64 *max_bytes,
297 u64 *max_vis_bytes)
298{
299 s64 time_us, increment_us;
300 u64 free_vram, total_vram, used_vram;
301
302
303
304
305
306
307
308
309 const s64 us_upper_bound = 200000;
310
311 if (!adev->mm_stats.log2_max_MBps) {
312 *max_bytes = 0;
313 *max_vis_bytes = 0;
314 return;
315 }
316
317 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
318 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
319 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
320
321 spin_lock(&adev->mm_stats.lock);
322
323
324 time_us = ktime_to_us(ktime_get());
325 increment_us = time_us - adev->mm_stats.last_update_us;
326 adev->mm_stats.last_update_us = time_us;
327 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
328 us_upper_bound);
329
330
331
332
333
334
335
336
337
338
339
340
341
342 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
343 s64 min_us;
344
345
346
347
348 if (!(adev->flags & AMD_IS_APU))
349 min_us = bytes_to_us(adev, free_vram / 4);
350 else
351 min_us = 0;
352
353 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
354 }
355
356
357
358
359 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
360
361
362 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
363 u64 total_vis_vram = adev->gmc.visible_vram_size;
364 u64 used_vis_vram =
365 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
366
367 if (used_vis_vram < total_vis_vram) {
368 u64 free_vis_vram = total_vis_vram - used_vis_vram;
369 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
370 increment_us, us_upper_bound);
371
372 if (free_vis_vram >= total_vis_vram / 2)
373 adev->mm_stats.accum_us_vis =
374 max(bytes_to_us(adev, free_vis_vram / 2),
375 adev->mm_stats.accum_us_vis);
376 }
377
378 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
379 } else {
380 *max_vis_bytes = 0;
381 }
382
383 spin_unlock(&adev->mm_stats.lock);
384}
385
386
387
388
389
390void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
391 u64 num_vis_bytes)
392{
393 spin_lock(&adev->mm_stats.lock);
394 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
395 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
396 spin_unlock(&adev->mm_stats.lock);
397}
398
399static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
400 struct amdgpu_bo *bo)
401{
402 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
403 struct ttm_operation_ctx ctx = {
404 .interruptible = true,
405 .no_wait_gpu = false,
406 .resv = bo->tbo.base.resv,
407 .flags = 0
408 };
409 uint32_t domain;
410 int r;
411
412 if (bo->pin_count)
413 return 0;
414
415
416
417
418 if (p->bytes_moved < p->bytes_moved_threshold) {
419 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
420 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
421
422
423
424
425 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
426 domain = bo->preferred_domains;
427 else
428 domain = bo->allowed_domains;
429 } else {
430 domain = bo->preferred_domains;
431 }
432 } else {
433 domain = bo->allowed_domains;
434 }
435
436retry:
437 amdgpu_bo_placement_from_domain(bo, domain);
438 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
439
440 p->bytes_moved += ctx.bytes_moved;
441 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
442 amdgpu_bo_in_cpu_visible_vram(bo))
443 p->bytes_moved_vis += ctx.bytes_moved;
444
445 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
446 domain = bo->allowed_domains;
447 goto retry;
448 }
449
450 return r;
451}
452
453static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
454{
455 struct amdgpu_cs_parser *p = param;
456 int r;
457
458 r = amdgpu_cs_bo_validate(p, bo);
459 if (r)
460 return r;
461
462 if (bo->shadow)
463 r = amdgpu_cs_bo_validate(p, bo->shadow);
464
465 return r;
466}
467
468static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
469 struct list_head *validated)
470{
471 struct ttm_operation_ctx ctx = { true, false };
472 struct amdgpu_bo_list_entry *lobj;
473 int r;
474
475 list_for_each_entry(lobj, validated, tv.head) {
476 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
477 struct mm_struct *usermm;
478
479 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
480 if (usermm && usermm != current->mm)
481 return -EPERM;
482
483 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
484 lobj->user_invalidated && lobj->user_pages) {
485 amdgpu_bo_placement_from_domain(bo,
486 AMDGPU_GEM_DOMAIN_CPU);
487 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
488 if (r)
489 return r;
490
491 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
492 lobj->user_pages);
493 }
494
495 r = amdgpu_cs_validate(p, bo);
496 if (r)
497 return r;
498
499 kvfree(lobj->user_pages);
500 lobj->user_pages = NULL;
501 }
502 return 0;
503}
504
505static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
506 union drm_amdgpu_cs *cs)
507{
508 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
509 struct amdgpu_vm *vm = &fpriv->vm;
510 struct amdgpu_bo_list_entry *e;
511 struct list_head duplicates;
512 struct amdgpu_bo *gds;
513 struct amdgpu_bo *gws;
514 struct amdgpu_bo *oa;
515 int r;
516
517 INIT_LIST_HEAD(&p->validated);
518
519
520 if (cs->in.bo_list_handle) {
521 if (p->bo_list)
522 return -EINVAL;
523
524 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
525 &p->bo_list);
526 if (r)
527 return r;
528 } else if (!p->bo_list) {
529
530 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
531 &p->bo_list);
532 if (r)
533 return r;
534 }
535
536
537 amdgpu_bo_list_for_each_entry(e, p->bo_list)
538 e->tv.num_shared = 2;
539
540 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
541
542 INIT_LIST_HEAD(&duplicates);
543 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
544
545 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
546 list_add(&p->uf_entry.tv.head, &p->validated);
547
548
549
550
551
552 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
553 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
554 bool userpage_invalidated = false;
555 int i;
556
557 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
558 sizeof(struct page *),
559 GFP_KERNEL | __GFP_ZERO);
560 if (!e->user_pages) {
561 DRM_ERROR("calloc failure\n");
562 return -ENOMEM;
563 }
564
565 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
566 if (r) {
567 kvfree(e->user_pages);
568 e->user_pages = NULL;
569 return r;
570 }
571
572 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
573 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
574 userpage_invalidated = true;
575 break;
576 }
577 }
578 e->user_invalidated = userpage_invalidated;
579 }
580
581 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
582 &duplicates);
583 if (unlikely(r != 0)) {
584 if (r != -ERESTARTSYS)
585 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
586 goto out;
587 }
588
589 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
590 &p->bytes_moved_vis_threshold);
591 p->bytes_moved = 0;
592 p->bytes_moved_vis = 0;
593
594 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
595 amdgpu_cs_validate, p);
596 if (r) {
597 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
598 goto error_validate;
599 }
600
601 r = amdgpu_cs_list_validate(p, &duplicates);
602 if (r)
603 goto error_validate;
604
605 r = amdgpu_cs_list_validate(p, &p->validated);
606 if (r)
607 goto error_validate;
608
609 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
610 p->bytes_moved_vis);
611
612 gds = p->bo_list->gds_obj;
613 gws = p->bo_list->gws_obj;
614 oa = p->bo_list->oa_obj;
615
616 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
617 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
618
619
620 if (bo->prime_shared_count)
621 e->tv.num_shared = 0;
622 e->bo_va = amdgpu_vm_bo_find(vm, bo);
623 }
624
625 if (gds) {
626 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
627 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
628 }
629 if (gws) {
630 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
631 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
632 }
633 if (oa) {
634 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
635 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
636 }
637
638 if (!r && p->uf_entry.tv.bo) {
639 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
640
641 r = amdgpu_ttm_alloc_gart(&uf->tbo);
642 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
643 }
644
645error_validate:
646 if (r)
647 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
648out:
649 return r;
650}
651
652static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
653{
654 struct amdgpu_bo_list_entry *e;
655 int r;
656
657 list_for_each_entry(e, &p->validated, tv.head) {
658 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
659 struct dma_resv *resv = bo->tbo.base.resv;
660
661 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
662 amdgpu_bo_explicit_sync(bo));
663
664 if (r)
665 return r;
666 }
667 return 0;
668}
669
670
671
672
673
674
675
676
677
678static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
679 bool backoff)
680{
681 unsigned i;
682
683 if (error && backoff)
684 ttm_eu_backoff_reservation(&parser->ticket,
685 &parser->validated);
686
687 for (i = 0; i < parser->num_post_deps; i++) {
688 drm_syncobj_put(parser->post_deps[i].syncobj);
689 kfree(parser->post_deps[i].chain);
690 }
691 kfree(parser->post_deps);
692
693 dma_fence_put(parser->fence);
694
695 if (parser->ctx) {
696 mutex_unlock(&parser->ctx->lock);
697 amdgpu_ctx_put(parser->ctx);
698 }
699 if (parser->bo_list)
700 amdgpu_bo_list_put(parser->bo_list);
701
702 for (i = 0; i < parser->nchunks; i++)
703 kvfree(parser->chunks[i].kdata);
704 kfree(parser->chunks);
705 if (parser->job)
706 amdgpu_job_free(parser->job);
707 if (parser->uf_entry.tv.bo) {
708 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
709
710 amdgpu_bo_unref(&uf);
711 }
712}
713
714static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
715{
716 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
717 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
718 struct amdgpu_device *adev = p->adev;
719 struct amdgpu_vm *vm = &fpriv->vm;
720 struct amdgpu_bo_list_entry *e;
721 struct amdgpu_bo_va *bo_va;
722 struct amdgpu_bo *bo;
723 int r;
724
725
726 if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
727 unsigned i, j;
728
729 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
730 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
731 struct amdgpu_bo_va_mapping *m;
732 struct amdgpu_bo *aobj = NULL;
733 struct amdgpu_cs_chunk *chunk;
734 uint64_t offset, va_start;
735 struct amdgpu_ib *ib;
736 uint8_t *kptr;
737
738 chunk = &p->chunks[i];
739 ib = &p->job->ibs[j];
740 chunk_ib = chunk->kdata;
741
742 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
743 continue;
744
745 va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
746 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
747 if (r) {
748 DRM_ERROR("IB va_start is invalid\n");
749 return r;
750 }
751
752 if ((va_start + chunk_ib->ib_bytes) >
753 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
754 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
755 return -EINVAL;
756 }
757
758
759 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
760 if (r) {
761 return r;
762 }
763
764 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
765 kptr += va_start - offset;
766
767 if (ring->funcs->parse_cs) {
768 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
769 amdgpu_bo_kunmap(aobj);
770
771 r = amdgpu_ring_parse_cs(ring, p, j);
772 if (r)
773 return r;
774 } else {
775 ib->ptr = (uint32_t *)kptr;
776 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
777 amdgpu_bo_kunmap(aobj);
778 if (r)
779 return r;
780 }
781
782 j++;
783 }
784 }
785
786 if (!p->job->vm)
787 return amdgpu_cs_sync_rings(p);
788
789
790 r = amdgpu_vm_clear_freed(adev, vm, NULL);
791 if (r)
792 return r;
793
794 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
795 if (r)
796 return r;
797
798 r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
799 if (r)
800 return r;
801
802 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
803 bo_va = fpriv->csa_va;
804 BUG_ON(!bo_va);
805 r = amdgpu_vm_bo_update(adev, bo_va, false);
806 if (r)
807 return r;
808
809 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
810 if (r)
811 return r;
812 }
813
814 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
815
816 bo = ttm_to_amdgpu_bo(e->tv.bo);
817 if (!bo)
818 continue;
819
820 bo_va = e->bo_va;
821 if (bo_va == NULL)
822 continue;
823
824 r = amdgpu_vm_bo_update(adev, bo_va, false);
825 if (r)
826 return r;
827
828 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
829 if (r)
830 return r;
831 }
832
833 r = amdgpu_vm_handle_moved(adev, vm);
834 if (r)
835 return r;
836
837 r = amdgpu_vm_update_pdes(adev, vm, false);
838 if (r)
839 return r;
840
841 r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
842 if (r)
843 return r;
844
845 p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
846
847 if (amdgpu_vm_debug) {
848
849 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
850 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
851
852
853 if (!bo)
854 continue;
855
856 amdgpu_vm_bo_invalidate(adev, bo, false);
857 }
858 }
859
860 return amdgpu_cs_sync_rings(p);
861}
862
863static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
864 struct amdgpu_cs_parser *parser)
865{
866 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
867 struct amdgpu_vm *vm = &fpriv->vm;
868 int r, ce_preempt = 0, de_preempt = 0;
869 struct amdgpu_ring *ring;
870 int i, j;
871
872 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
873 struct amdgpu_cs_chunk *chunk;
874 struct amdgpu_ib *ib;
875 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
876 struct drm_sched_entity *entity;
877
878 chunk = &parser->chunks[i];
879 ib = &parser->job->ibs[j];
880 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
881
882 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
883 continue;
884
885 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
886 (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
887 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
888 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
889 ce_preempt++;
890 else
891 de_preempt++;
892 }
893
894
895 if (ce_preempt > 1 || de_preempt > 1)
896 return -EINVAL;
897 }
898
899 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
900 chunk_ib->ip_instance, chunk_ib->ring,
901 &entity);
902 if (r)
903 return r;
904
905 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
906 parser->job->preamble_status |=
907 AMDGPU_PREAMBLE_IB_PRESENT;
908
909 if (parser->entity && parser->entity != entity)
910 return -EINVAL;
911
912
913
914 if (entity->rq == NULL)
915 return -EINVAL;
916
917 parser->entity = entity;
918
919 ring = to_amdgpu_ring(entity->rq->sched);
920 r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
921 chunk_ib->ib_bytes : 0, ib);
922 if (r) {
923 DRM_ERROR("Failed to get ib !\n");
924 return r;
925 }
926
927 ib->gpu_addr = chunk_ib->va_start;
928 ib->length_dw = chunk_ib->ib_bytes / 4;
929 ib->flags = chunk_ib->flags;
930
931 j++;
932 }
933
934
935 ring = to_amdgpu_ring(parser->entity->rq->sched);
936 if (parser->job->uf_addr && ring->funcs->no_user_fence)
937 return -EINVAL;
938
939 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
940}
941
942static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
943 struct amdgpu_cs_chunk *chunk)
944{
945 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
946 unsigned num_deps;
947 int i, r;
948 struct drm_amdgpu_cs_chunk_dep *deps;
949
950 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
951 num_deps = chunk->length_dw * 4 /
952 sizeof(struct drm_amdgpu_cs_chunk_dep);
953
954 for (i = 0; i < num_deps; ++i) {
955 struct amdgpu_ctx *ctx;
956 struct drm_sched_entity *entity;
957 struct dma_fence *fence;
958
959 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
960 if (ctx == NULL)
961 return -EINVAL;
962
963 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
964 deps[i].ip_instance,
965 deps[i].ring, &entity);
966 if (r) {
967 amdgpu_ctx_put(ctx);
968 return r;
969 }
970
971 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
972 amdgpu_ctx_put(ctx);
973
974 if (IS_ERR(fence))
975 return PTR_ERR(fence);
976 else if (!fence)
977 continue;
978
979 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
980 struct drm_sched_fence *s_fence;
981 struct dma_fence *old = fence;
982
983 s_fence = to_drm_sched_fence(fence);
984 fence = dma_fence_get(&s_fence->scheduled);
985 dma_fence_put(old);
986 }
987
988 r = amdgpu_sync_fence(&p->job->sync, fence, true);
989 dma_fence_put(fence);
990 if (r)
991 return r;
992 }
993 return 0;
994}
995
996static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
997 uint32_t handle, u64 point,
998 u64 flags)
999{
1000 struct dma_fence *fence;
1001 int r;
1002
1003 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1004 if (r) {
1005 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1006 handle, point, r);
1007 return r;
1008 }
1009
1010 r = amdgpu_sync_fence(&p->job->sync, fence, true);
1011 dma_fence_put(fence);
1012
1013 return r;
1014}
1015
1016static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1017 struct amdgpu_cs_chunk *chunk)
1018{
1019 struct drm_amdgpu_cs_chunk_sem *deps;
1020 unsigned num_deps;
1021 int i, r;
1022
1023 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1024 num_deps = chunk->length_dw * 4 /
1025 sizeof(struct drm_amdgpu_cs_chunk_sem);
1026 for (i = 0; i < num_deps; ++i) {
1027 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1028 0, 0);
1029 if (r)
1030 return r;
1031 }
1032
1033 return 0;
1034}
1035
1036
1037static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1038 struct amdgpu_cs_chunk *chunk)
1039{
1040 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1041 unsigned num_deps;
1042 int i, r;
1043
1044 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1045 num_deps = chunk->length_dw * 4 /
1046 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1047 for (i = 0; i < num_deps; ++i) {
1048 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1049 syncobj_deps[i].handle,
1050 syncobj_deps[i].point,
1051 syncobj_deps[i].flags);
1052 if (r)
1053 return r;
1054 }
1055
1056 return 0;
1057}
1058
1059static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1060 struct amdgpu_cs_chunk *chunk)
1061{
1062 struct drm_amdgpu_cs_chunk_sem *deps;
1063 unsigned num_deps;
1064 int i;
1065
1066 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1067 num_deps = chunk->length_dw * 4 /
1068 sizeof(struct drm_amdgpu_cs_chunk_sem);
1069
1070 if (p->post_deps)
1071 return -EINVAL;
1072
1073 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1074 GFP_KERNEL);
1075 p->num_post_deps = 0;
1076
1077 if (!p->post_deps)
1078 return -ENOMEM;
1079
1080
1081 for (i = 0; i < num_deps; ++i) {
1082 p->post_deps[i].syncobj =
1083 drm_syncobj_find(p->filp, deps[i].handle);
1084 if (!p->post_deps[i].syncobj)
1085 return -EINVAL;
1086 p->post_deps[i].chain = NULL;
1087 p->post_deps[i].point = 0;
1088 p->num_post_deps++;
1089 }
1090
1091 return 0;
1092}
1093
1094
1095static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1096 struct amdgpu_cs_chunk *chunk)
1097{
1098 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1099 unsigned num_deps;
1100 int i;
1101
1102 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1103 num_deps = chunk->length_dw * 4 /
1104 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1105
1106 if (p->post_deps)
1107 return -EINVAL;
1108
1109 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1110 GFP_KERNEL);
1111 p->num_post_deps = 0;
1112
1113 if (!p->post_deps)
1114 return -ENOMEM;
1115
1116 for (i = 0; i < num_deps; ++i) {
1117 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1118
1119 dep->chain = NULL;
1120 if (syncobj_deps[i].point) {
1121 dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1122 if (!dep->chain)
1123 return -ENOMEM;
1124 }
1125
1126 dep->syncobj = drm_syncobj_find(p->filp,
1127 syncobj_deps[i].handle);
1128 if (!dep->syncobj) {
1129 kfree(dep->chain);
1130 return -EINVAL;
1131 }
1132 dep->point = syncobj_deps[i].point;
1133 p->num_post_deps++;
1134 }
1135
1136 return 0;
1137}
1138
1139static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1140 struct amdgpu_cs_parser *p)
1141{
1142 int i, r;
1143
1144 for (i = 0; i < p->nchunks; ++i) {
1145 struct amdgpu_cs_chunk *chunk;
1146
1147 chunk = &p->chunks[i];
1148
1149 switch (chunk->chunk_id) {
1150 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1151 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1152 r = amdgpu_cs_process_fence_dep(p, chunk);
1153 if (r)
1154 return r;
1155 break;
1156 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1157 r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1158 if (r)
1159 return r;
1160 break;
1161 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1162 r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1163 if (r)
1164 return r;
1165 break;
1166 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1167 r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1168 if (r)
1169 return r;
1170 break;
1171 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1172 r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1173 if (r)
1174 return r;
1175 break;
1176 }
1177 }
1178
1179 return 0;
1180}
1181
1182static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1183{
1184 int i;
1185
1186 for (i = 0; i < p->num_post_deps; ++i) {
1187 if (p->post_deps[i].chain && p->post_deps[i].point) {
1188 drm_syncobj_add_point(p->post_deps[i].syncobj,
1189 p->post_deps[i].chain,
1190 p->fence, p->post_deps[i].point);
1191 p->post_deps[i].chain = NULL;
1192 } else {
1193 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1194 p->fence);
1195 }
1196 }
1197}
1198
1199static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1200 union drm_amdgpu_cs *cs)
1201{
1202 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1203 struct drm_sched_entity *entity = p->entity;
1204 enum drm_sched_priority priority;
1205 struct amdgpu_ring *ring;
1206 struct amdgpu_bo_list_entry *e;
1207 struct amdgpu_job *job;
1208 uint64_t seq;
1209 int r;
1210
1211 job = p->job;
1212 p->job = NULL;
1213
1214 r = drm_sched_job_init(&job->base, entity, p->filp);
1215 if (r)
1216 goto error_unlock;
1217
1218
1219
1220
1221
1222 mutex_lock(&p->adev->notifier_lock);
1223
1224
1225
1226
1227 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1228 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1229
1230 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1231 }
1232 if (r) {
1233 r = -EAGAIN;
1234 goto error_abort;
1235 }
1236
1237 p->fence = dma_fence_get(&job->base.s_fence->finished);
1238
1239 amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1240 amdgpu_cs_post_dependencies(p);
1241
1242 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1243 !p->ctx->preamble_presented) {
1244 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1245 p->ctx->preamble_presented = true;
1246 }
1247
1248 cs->out.handle = seq;
1249 job->uf_sequence = seq;
1250
1251 amdgpu_job_free_resources(job);
1252
1253 trace_amdgpu_cs_ioctl(job);
1254 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1255 priority = job->base.s_priority;
1256 drm_sched_entity_push_job(&job->base, entity);
1257
1258 ring = to_amdgpu_ring(entity->rq->sched);
1259 amdgpu_ring_priority_get(ring, priority);
1260
1261 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1262
1263 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1264 mutex_unlock(&p->adev->notifier_lock);
1265
1266 return 0;
1267
1268error_abort:
1269 drm_sched_job_cleanup(&job->base);
1270 mutex_unlock(&p->adev->notifier_lock);
1271
1272error_unlock:
1273 amdgpu_job_free(job);
1274 return r;
1275}
1276
1277int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1278{
1279 struct amdgpu_device *adev = dev->dev_private;
1280 union drm_amdgpu_cs *cs = data;
1281 struct amdgpu_cs_parser parser = {};
1282 bool reserved_buffers = false;
1283 int i, r;
1284
1285 if (amdgpu_ras_intr_triggered())
1286 return -EHWPOISON;
1287
1288 if (!adev->accel_working)
1289 return -EBUSY;
1290
1291 parser.adev = adev;
1292 parser.filp = filp;
1293
1294 r = amdgpu_cs_parser_init(&parser, data);
1295 if (r) {
1296 DRM_ERROR("Failed to initialize parser %d!\n", r);
1297 goto out;
1298 }
1299
1300 r = amdgpu_cs_ib_fill(adev, &parser);
1301 if (r)
1302 goto out;
1303
1304 r = amdgpu_cs_dependencies(adev, &parser);
1305 if (r) {
1306 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1307 goto out;
1308 }
1309
1310 r = amdgpu_cs_parser_bos(&parser, data);
1311 if (r) {
1312 if (r == -ENOMEM)
1313 DRM_ERROR("Not enough memory for command submission!\n");
1314 else if (r != -ERESTARTSYS && r != -EAGAIN)
1315 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1316 goto out;
1317 }
1318
1319 reserved_buffers = true;
1320
1321 for (i = 0; i < parser.job->num_ibs; i++)
1322 trace_amdgpu_cs(&parser, i);
1323
1324 r = amdgpu_cs_vm_handling(&parser);
1325 if (r)
1326 goto out;
1327
1328 r = amdgpu_cs_submit(&parser, cs);
1329
1330out:
1331 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1332
1333 return r;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1346 struct drm_file *filp)
1347{
1348 union drm_amdgpu_wait_cs *wait = data;
1349 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1350 struct drm_sched_entity *entity;
1351 struct amdgpu_ctx *ctx;
1352 struct dma_fence *fence;
1353 long r;
1354
1355 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1356 if (ctx == NULL)
1357 return -EINVAL;
1358
1359 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1360 wait->in.ring, &entity);
1361 if (r) {
1362 amdgpu_ctx_put(ctx);
1363 return r;
1364 }
1365
1366 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1367 if (IS_ERR(fence))
1368 r = PTR_ERR(fence);
1369 else if (fence) {
1370 r = dma_fence_wait_timeout(fence, true, timeout);
1371 if (r > 0 && fence->error)
1372 r = fence->error;
1373 dma_fence_put(fence);
1374 } else
1375 r = 1;
1376
1377 amdgpu_ctx_put(ctx);
1378 if (r < 0)
1379 return r;
1380
1381 memset(wait, 0, sizeof(*wait));
1382 wait->out.status = (r == 0);
1383
1384 return 0;
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1395 struct drm_file *filp,
1396 struct drm_amdgpu_fence *user)
1397{
1398 struct drm_sched_entity *entity;
1399 struct amdgpu_ctx *ctx;
1400 struct dma_fence *fence;
1401 int r;
1402
1403 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1404 if (ctx == NULL)
1405 return ERR_PTR(-EINVAL);
1406
1407 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1408 user->ring, &entity);
1409 if (r) {
1410 amdgpu_ctx_put(ctx);
1411 return ERR_PTR(r);
1412 }
1413
1414 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1415 amdgpu_ctx_put(ctx);
1416
1417 return fence;
1418}
1419
1420int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1421 struct drm_file *filp)
1422{
1423 struct amdgpu_device *adev = dev->dev_private;
1424 union drm_amdgpu_fence_to_handle *info = data;
1425 struct dma_fence *fence;
1426 struct drm_syncobj *syncobj;
1427 struct sync_file *sync_file;
1428 int fd, r;
1429
1430 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1431 if (IS_ERR(fence))
1432 return PTR_ERR(fence);
1433
1434 if (!fence)
1435 fence = dma_fence_get_stub();
1436
1437 switch (info->in.what) {
1438 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1439 r = drm_syncobj_create(&syncobj, 0, fence);
1440 dma_fence_put(fence);
1441 if (r)
1442 return r;
1443 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1444 drm_syncobj_put(syncobj);
1445 return r;
1446
1447 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1448 r = drm_syncobj_create(&syncobj, 0, fence);
1449 dma_fence_put(fence);
1450 if (r)
1451 return r;
1452 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1453 drm_syncobj_put(syncobj);
1454 return r;
1455
1456 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1457 fd = get_unused_fd_flags(O_CLOEXEC);
1458 if (fd < 0) {
1459 dma_fence_put(fence);
1460 return fd;
1461 }
1462
1463 sync_file = sync_file_create(fence);
1464 dma_fence_put(fence);
1465 if (!sync_file) {
1466 put_unused_fd(fd);
1467 return -ENOMEM;
1468 }
1469
1470 fd_install(fd, sync_file->file);
1471 info->out.handle = fd;
1472 return 0;
1473
1474 default:
1475 return -EINVAL;
1476 }
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1488 struct drm_file *filp,
1489 union drm_amdgpu_wait_fences *wait,
1490 struct drm_amdgpu_fence *fences)
1491{
1492 uint32_t fence_count = wait->in.fence_count;
1493 unsigned int i;
1494 long r = 1;
1495
1496 for (i = 0; i < fence_count; i++) {
1497 struct dma_fence *fence;
1498 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1499
1500 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1501 if (IS_ERR(fence))
1502 return PTR_ERR(fence);
1503 else if (!fence)
1504 continue;
1505
1506 r = dma_fence_wait_timeout(fence, true, timeout);
1507 dma_fence_put(fence);
1508 if (r < 0)
1509 return r;
1510
1511 if (r == 0)
1512 break;
1513
1514 if (fence->error)
1515 return fence->error;
1516 }
1517
1518 memset(wait, 0, sizeof(*wait));
1519 wait->out.status = (r > 0);
1520
1521 return 0;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1533 struct drm_file *filp,
1534 union drm_amdgpu_wait_fences *wait,
1535 struct drm_amdgpu_fence *fences)
1536{
1537 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1538 uint32_t fence_count = wait->in.fence_count;
1539 uint32_t first = ~0;
1540 struct dma_fence **array;
1541 unsigned int i;
1542 long r;
1543
1544
1545 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1546
1547 if (array == NULL)
1548 return -ENOMEM;
1549
1550 for (i = 0; i < fence_count; i++) {
1551 struct dma_fence *fence;
1552
1553 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1554 if (IS_ERR(fence)) {
1555 r = PTR_ERR(fence);
1556 goto err_free_fence_array;
1557 } else if (fence) {
1558 array[i] = fence;
1559 } else {
1560 r = 1;
1561 first = i;
1562 goto out;
1563 }
1564 }
1565
1566 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1567 &first);
1568 if (r < 0)
1569 goto err_free_fence_array;
1570
1571out:
1572 memset(wait, 0, sizeof(*wait));
1573 wait->out.status = (r > 0);
1574 wait->out.first_signaled = first;
1575
1576 if (first < fence_count && array[first])
1577 r = array[first]->error;
1578 else
1579 r = 0;
1580
1581err_free_fence_array:
1582 for (i = 0; i < fence_count; i++)
1583 dma_fence_put(array[i]);
1584 kfree(array);
1585
1586 return r;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1597 struct drm_file *filp)
1598{
1599 struct amdgpu_device *adev = dev->dev_private;
1600 union drm_amdgpu_wait_fences *wait = data;
1601 uint32_t fence_count = wait->in.fence_count;
1602 struct drm_amdgpu_fence *fences_user;
1603 struct drm_amdgpu_fence *fences;
1604 int r;
1605
1606
1607 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1608 GFP_KERNEL);
1609 if (fences == NULL)
1610 return -ENOMEM;
1611
1612 fences_user = u64_to_user_ptr(wait->in.fences);
1613 if (copy_from_user(fences, fences_user,
1614 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1615 r = -EFAULT;
1616 goto err_free_fences;
1617 }
1618
1619 if (wait->in.wait_all)
1620 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1621 else
1622 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1623
1624err_free_fences:
1625 kfree(fences);
1626
1627 return r;
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1642 uint64_t addr, struct amdgpu_bo **bo,
1643 struct amdgpu_bo_va_mapping **map)
1644{
1645 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1646 struct ttm_operation_ctx ctx = { false, false };
1647 struct amdgpu_vm *vm = &fpriv->vm;
1648 struct amdgpu_bo_va_mapping *mapping;
1649 int r;
1650
1651 addr /= AMDGPU_GPU_PAGE_SIZE;
1652
1653 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1654 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1655 return -EINVAL;
1656
1657 *bo = mapping->bo_va->base.bo;
1658 *map = mapping;
1659
1660
1661 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1662 return -EINVAL;
1663
1664 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1665 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1666 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1667 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1668 if (r)
1669 return r;
1670 }
1671
1672 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1673}
1674