1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/file.h>
29#include <linux/pagemap.h>
30#include <linux/sync_file.h>
31
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_syncobj.h>
34#include "amdgpu.h"
35#include "amdgpu_trace.h"
36#include "amdgpu_gmc.h"
37#include "amdgpu_gem.h"
38
39static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
40 struct drm_amdgpu_cs_chunk_fence *data,
41 uint32_t *offset)
42{
43 struct drm_gem_object *gobj;
44 struct amdgpu_bo *bo;
45 unsigned long size;
46 int r;
47
48 gobj = drm_gem_object_lookup(p->filp, data->handle);
49 if (gobj == NULL)
50 return -EINVAL;
51
52 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
53 p->uf_entry.priority = 0;
54 p->uf_entry.tv.bo = &bo->tbo;
55
56 p->uf_entry.tv.num_shared = 2;
57
58 drm_gem_object_put_unlocked(gobj);
59
60 size = amdgpu_bo_size(bo);
61 if (size != PAGE_SIZE || (data->offset + 8) > size) {
62 r = -EINVAL;
63 goto error_unref;
64 }
65
66 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
67 r = -EINVAL;
68 goto error_unref;
69 }
70
71 *offset = data->offset;
72
73 return 0;
74
75error_unref:
76 amdgpu_bo_unref(&bo);
77 return r;
78}
79
80static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
81 struct drm_amdgpu_bo_list_in *data)
82{
83 int r;
84 struct drm_amdgpu_bo_list_entry *info = NULL;
85
86 r = amdgpu_bo_create_list_entry_array(data, &info);
87 if (r)
88 return r;
89
90 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
91 &p->bo_list);
92 if (r)
93 goto error_free;
94
95 kvfree(info);
96 return 0;
97
98error_free:
99 if (info)
100 kvfree(info);
101
102 return r;
103}
104
105static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
106{
107 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
108 struct amdgpu_vm *vm = &fpriv->vm;
109 uint64_t *chunk_array_user;
110 uint64_t *chunk_array;
111 unsigned size, num_ibs = 0;
112 uint32_t uf_offset = 0;
113 int i;
114 int ret;
115
116 if (cs->in.num_chunks == 0)
117 return 0;
118
119 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
120 if (!chunk_array)
121 return -ENOMEM;
122
123 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
124 if (!p->ctx) {
125 ret = -EINVAL;
126 goto free_chunk;
127 }
128
129 mutex_lock(&p->ctx->lock);
130
131
132 if (atomic_read(&p->ctx->guilty) == 1) {
133 ret = -ECANCELED;
134 goto free_chunk;
135 }
136
137
138 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
139 if (copy_from_user(chunk_array, chunk_array_user,
140 sizeof(uint64_t)*cs->in.num_chunks)) {
141 ret = -EFAULT;
142 goto free_chunk;
143 }
144
145 p->nchunks = cs->in.num_chunks;
146 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
147 GFP_KERNEL);
148 if (!p->chunks) {
149 ret = -ENOMEM;
150 goto free_chunk;
151 }
152
153 for (i = 0; i < p->nchunks; i++) {
154 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
155 struct drm_amdgpu_cs_chunk user_chunk;
156 uint32_t __user *cdata;
157
158 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
159 if (copy_from_user(&user_chunk, chunk_ptr,
160 sizeof(struct drm_amdgpu_cs_chunk))) {
161 ret = -EFAULT;
162 i--;
163 goto free_partial_kdata;
164 }
165 p->chunks[i].chunk_id = user_chunk.chunk_id;
166 p->chunks[i].length_dw = user_chunk.length_dw;
167
168 size = p->chunks[i].length_dw;
169 cdata = u64_to_user_ptr(user_chunk.chunk_data);
170
171 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
172 if (p->chunks[i].kdata == NULL) {
173 ret = -ENOMEM;
174 i--;
175 goto free_partial_kdata;
176 }
177 size *= sizeof(uint32_t);
178 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
179 ret = -EFAULT;
180 goto free_partial_kdata;
181 }
182
183 switch (p->chunks[i].chunk_id) {
184 case AMDGPU_CHUNK_ID_IB:
185 ++num_ibs;
186 break;
187
188 case AMDGPU_CHUNK_ID_FENCE:
189 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
190 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
191 ret = -EINVAL;
192 goto free_partial_kdata;
193 }
194
195 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
196 &uf_offset);
197 if (ret)
198 goto free_partial_kdata;
199
200 break;
201
202 case AMDGPU_CHUNK_ID_BO_HANDLES:
203 size = sizeof(struct drm_amdgpu_bo_list_in);
204 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
205 ret = -EINVAL;
206 goto free_partial_kdata;
207 }
208
209 ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
210 if (ret)
211 goto free_partial_kdata;
212
213 break;
214
215 case AMDGPU_CHUNK_ID_DEPENDENCIES:
216 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
217 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
218 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
219 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
220 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
221 break;
222
223 default:
224 ret = -EINVAL;
225 goto free_partial_kdata;
226 }
227 }
228
229 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
230 if (ret)
231 goto free_all_kdata;
232
233 if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
234 ret = -ECANCELED;
235 goto free_all_kdata;
236 }
237
238 if (p->uf_entry.tv.bo)
239 p->job->uf_addr = uf_offset;
240 kfree(chunk_array);
241
242
243 amdgpu_vm_set_task_info(vm);
244
245 return 0;
246
247free_all_kdata:
248 i = p->nchunks - 1;
249free_partial_kdata:
250 for (; i >= 0; i--)
251 kvfree(p->chunks[i].kdata);
252 kfree(p->chunks);
253 p->chunks = NULL;
254 p->nchunks = 0;
255free_chunk:
256 kfree(chunk_array);
257
258 return ret;
259}
260
261
262static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
263{
264 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
265 return 0;
266
267
268
269
270 return us << adev->mm_stats.log2_max_MBps;
271}
272
273static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
274{
275 if (!adev->mm_stats.log2_max_MBps)
276 return 0;
277
278 return bytes >> adev->mm_stats.log2_max_MBps;
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
295 u64 *max_bytes,
296 u64 *max_vis_bytes)
297{
298 s64 time_us, increment_us;
299 u64 free_vram, total_vram, used_vram;
300
301
302
303
304
305
306
307
308 const s64 us_upper_bound = 200000;
309
310 if (!adev->mm_stats.log2_max_MBps) {
311 *max_bytes = 0;
312 *max_vis_bytes = 0;
313 return;
314 }
315
316 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
317 used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
318 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
319
320 spin_lock(&adev->mm_stats.lock);
321
322
323 time_us = ktime_to_us(ktime_get());
324 increment_us = time_us - adev->mm_stats.last_update_us;
325 adev->mm_stats.last_update_us = time_us;
326 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
327 us_upper_bound);
328
329
330
331
332
333
334
335
336
337
338
339
340
341 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
342 s64 min_us;
343
344
345
346
347 if (!(adev->flags & AMD_IS_APU))
348 min_us = bytes_to_us(adev, free_vram / 4);
349 else
350 min_us = 0;
351
352 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
353 }
354
355
356
357
358 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
359
360
361 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
362 u64 total_vis_vram = adev->gmc.visible_vram_size;
363 u64 used_vis_vram =
364 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
365
366 if (used_vis_vram < total_vis_vram) {
367 u64 free_vis_vram = total_vis_vram - used_vis_vram;
368 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
369 increment_us, us_upper_bound);
370
371 if (free_vis_vram >= total_vis_vram / 2)
372 adev->mm_stats.accum_us_vis =
373 max(bytes_to_us(adev, free_vis_vram / 2),
374 adev->mm_stats.accum_us_vis);
375 }
376
377 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
378 } else {
379 *max_vis_bytes = 0;
380 }
381
382 spin_unlock(&adev->mm_stats.lock);
383}
384
385
386
387
388
389void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
390 u64 num_vis_bytes)
391{
392 spin_lock(&adev->mm_stats.lock);
393 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
394 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
395 spin_unlock(&adev->mm_stats.lock);
396}
397
398static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
399 struct amdgpu_bo *bo)
400{
401 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402 struct ttm_operation_ctx ctx = {
403 .interruptible = true,
404 .no_wait_gpu = false,
405 .resv = bo->tbo.resv,
406 .flags = 0
407 };
408 uint32_t domain;
409 int r;
410
411 if (bo->pin_count)
412 return 0;
413
414
415
416
417 if (p->bytes_moved < p->bytes_moved_threshold) {
418 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
419 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
420
421
422
423
424 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
425 domain = bo->preferred_domains;
426 else
427 domain = bo->allowed_domains;
428 } else {
429 domain = bo->preferred_domains;
430 }
431 } else {
432 domain = bo->allowed_domains;
433 }
434
435retry:
436 amdgpu_bo_placement_from_domain(bo, domain);
437 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
438
439 p->bytes_moved += ctx.bytes_moved;
440 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
441 amdgpu_bo_in_cpu_visible_vram(bo))
442 p->bytes_moved_vis += ctx.bytes_moved;
443
444 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
445 domain = bo->allowed_domains;
446 goto retry;
447 }
448
449 return r;
450}
451
452
453static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
454 struct amdgpu_bo *validated)
455{
456 uint32_t domain = validated->allowed_domains;
457 struct ttm_operation_ctx ctx = { true, false };
458 int r;
459
460 if (!p->evictable)
461 return false;
462
463 for (;&p->evictable->tv.head != &p->validated;
464 p->evictable = list_prev_entry(p->evictable, tv.head)) {
465
466 struct amdgpu_bo_list_entry *candidate = p->evictable;
467 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
468 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
469 bool update_bytes_moved_vis;
470 uint32_t other;
471
472
473 if (bo == validated)
474 break;
475
476
477 if (bo->pin_count)
478 continue;
479
480 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
481
482
483 if (!(other & domain))
484 continue;
485
486
487 other = bo->allowed_domains & ~domain;
488 if (!other)
489 continue;
490
491
492 update_bytes_moved_vis =
493 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
494 amdgpu_bo_in_cpu_visible_vram(bo);
495 amdgpu_bo_placement_from_domain(bo, other);
496 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
497 p->bytes_moved += ctx.bytes_moved;
498 if (update_bytes_moved_vis)
499 p->bytes_moved_vis += ctx.bytes_moved;
500
501 if (unlikely(r))
502 break;
503
504 p->evictable = list_prev_entry(p->evictable, tv.head);
505 list_move(&candidate->tv.head, &p->validated);
506
507 return true;
508 }
509
510 return false;
511}
512
513static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
514{
515 struct amdgpu_cs_parser *p = param;
516 int r;
517
518 do {
519 r = amdgpu_cs_bo_validate(p, bo);
520 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
521 if (r)
522 return r;
523
524 if (bo->shadow)
525 r = amdgpu_cs_bo_validate(p, bo->shadow);
526
527 return r;
528}
529
530static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
531 struct list_head *validated)
532{
533 struct ttm_operation_ctx ctx = { true, false };
534 struct amdgpu_bo_list_entry *lobj;
535 int r;
536
537 list_for_each_entry(lobj, validated, tv.head) {
538 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
539 bool binding_userptr = false;
540 struct mm_struct *usermm;
541
542 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
543 if (usermm && usermm != current->mm)
544 return -EPERM;
545
546 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
547 lobj->user_invalidated && lobj->user_pages) {
548 amdgpu_bo_placement_from_domain(bo,
549 AMDGPU_GEM_DOMAIN_CPU);
550 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
551 if (r)
552 return r;
553
554 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
555 lobj->user_pages);
556 binding_userptr = true;
557 }
558
559 if (p->evictable == lobj)
560 p->evictable = NULL;
561
562 r = amdgpu_cs_validate(p, bo);
563 if (r)
564 return r;
565
566 if (binding_userptr) {
567 kvfree(lobj->user_pages);
568 lobj->user_pages = NULL;
569 }
570 }
571 return 0;
572}
573
574static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
575 union drm_amdgpu_cs *cs)
576{
577 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
578 struct amdgpu_vm *vm = &fpriv->vm;
579 struct amdgpu_bo_list_entry *e;
580 struct list_head duplicates;
581 struct amdgpu_bo *gds;
582 struct amdgpu_bo *gws;
583 struct amdgpu_bo *oa;
584 int r;
585
586 INIT_LIST_HEAD(&p->validated);
587
588
589 if (cs->in.bo_list_handle) {
590 if (p->bo_list)
591 return -EINVAL;
592
593 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
594 &p->bo_list);
595 if (r)
596 return r;
597 } else if (!p->bo_list) {
598
599 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
600 &p->bo_list);
601 if (r)
602 return r;
603 }
604
605
606 amdgpu_bo_list_for_each_entry(e, p->bo_list)
607 e->tv.num_shared = 2;
608
609 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
610 if (p->bo_list->first_userptr != p->bo_list->num_entries)
611 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
612
613 INIT_LIST_HEAD(&duplicates);
614 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
615
616 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
617 list_add(&p->uf_entry.tv.head, &p->validated);
618
619
620
621
622
623 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
624 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
625 bool userpage_invalidated = false;
626 int i;
627
628 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
629 sizeof(struct page *),
630 GFP_KERNEL | __GFP_ZERO);
631 if (!e->user_pages) {
632 DRM_ERROR("calloc failure\n");
633 return -ENOMEM;
634 }
635
636 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
637 if (r) {
638 kvfree(e->user_pages);
639 e->user_pages = NULL;
640 return r;
641 }
642
643 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
644 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
645 userpage_invalidated = true;
646 break;
647 }
648 }
649 e->user_invalidated = userpage_invalidated;
650 }
651
652 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
653 &duplicates, false);
654 if (unlikely(r != 0)) {
655 if (r != -ERESTARTSYS)
656 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
657 goto out;
658 }
659
660 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
661 &p->bytes_moved_vis_threshold);
662 p->bytes_moved = 0;
663 p->bytes_moved_vis = 0;
664 p->evictable = list_last_entry(&p->validated,
665 struct amdgpu_bo_list_entry,
666 tv.head);
667
668 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
669 amdgpu_cs_validate, p);
670 if (r) {
671 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
672 goto error_validate;
673 }
674
675 r = amdgpu_cs_list_validate(p, &duplicates);
676 if (r)
677 goto error_validate;
678
679 r = amdgpu_cs_list_validate(p, &p->validated);
680 if (r)
681 goto error_validate;
682
683 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
684 p->bytes_moved_vis);
685
686 gds = p->bo_list->gds_obj;
687 gws = p->bo_list->gws_obj;
688 oa = p->bo_list->oa_obj;
689
690 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
691 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
692
693
694 if (bo->prime_shared_count)
695 e->tv.num_shared = 0;
696 e->bo_va = amdgpu_vm_bo_find(vm, bo);
697 }
698
699 if (gds) {
700 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
701 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
702 }
703 if (gws) {
704 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
705 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
706 }
707 if (oa) {
708 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
709 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
710 }
711
712 if (!r && p->uf_entry.tv.bo) {
713 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
714
715 r = amdgpu_ttm_alloc_gart(&uf->tbo);
716 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
717 }
718
719error_validate:
720 if (r)
721 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
722out:
723 return r;
724}
725
726static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
727{
728 struct amdgpu_bo_list_entry *e;
729 int r;
730
731 list_for_each_entry(e, &p->validated, tv.head) {
732 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
733 struct reservation_object *resv = bo->tbo.resv;
734
735 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
736 amdgpu_bo_explicit_sync(bo));
737
738 if (r)
739 return r;
740 }
741 return 0;
742}
743
744
745
746
747
748
749
750
751
752static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
753 bool backoff)
754{
755 unsigned i;
756
757 if (error && backoff)
758 ttm_eu_backoff_reservation(&parser->ticket,
759 &parser->validated);
760
761 for (i = 0; i < parser->num_post_deps; i++) {
762 drm_syncobj_put(parser->post_deps[i].syncobj);
763 kfree(parser->post_deps[i].chain);
764 }
765 kfree(parser->post_deps);
766
767 dma_fence_put(parser->fence);
768
769 if (parser->ctx) {
770 mutex_unlock(&parser->ctx->lock);
771 amdgpu_ctx_put(parser->ctx);
772 }
773 if (parser->bo_list)
774 amdgpu_bo_list_put(parser->bo_list);
775
776 for (i = 0; i < parser->nchunks; i++)
777 kvfree(parser->chunks[i].kdata);
778 kfree(parser->chunks);
779 if (parser->job)
780 amdgpu_job_free(parser->job);
781 if (parser->uf_entry.tv.bo) {
782 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
783
784 amdgpu_bo_unref(&uf);
785 }
786}
787
788static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
789{
790 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
791 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
792 struct amdgpu_device *adev = p->adev;
793 struct amdgpu_vm *vm = &fpriv->vm;
794 struct amdgpu_bo_list_entry *e;
795 struct amdgpu_bo_va *bo_va;
796 struct amdgpu_bo *bo;
797 int r;
798
799
800 if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
801 unsigned i, j;
802
803 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
804 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
805 struct amdgpu_bo_va_mapping *m;
806 struct amdgpu_bo *aobj = NULL;
807 struct amdgpu_cs_chunk *chunk;
808 uint64_t offset, va_start;
809 struct amdgpu_ib *ib;
810 uint8_t *kptr;
811
812 chunk = &p->chunks[i];
813 ib = &p->job->ibs[j];
814 chunk_ib = chunk->kdata;
815
816 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
817 continue;
818
819 va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
820 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
821 if (r) {
822 DRM_ERROR("IB va_start is invalid\n");
823 return r;
824 }
825
826 if ((va_start + chunk_ib->ib_bytes) >
827 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
828 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
829 return -EINVAL;
830 }
831
832
833 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
834 if (r) {
835 return r;
836 }
837
838 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
839 kptr += va_start - offset;
840
841 if (ring->funcs->parse_cs) {
842 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
843 amdgpu_bo_kunmap(aobj);
844
845 r = amdgpu_ring_parse_cs(ring, p, j);
846 if (r)
847 return r;
848 } else {
849 ib->ptr = (uint32_t *)kptr;
850 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
851 amdgpu_bo_kunmap(aobj);
852 if (r)
853 return r;
854 }
855
856 j++;
857 }
858 }
859
860 if (!p->job->vm)
861 return amdgpu_cs_sync_rings(p);
862
863
864 r = amdgpu_vm_clear_freed(adev, vm, NULL);
865 if (r)
866 return r;
867
868 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
869 if (r)
870 return r;
871
872 r = amdgpu_sync_fence(adev, &p->job->sync,
873 fpriv->prt_va->last_pt_update, false);
874 if (r)
875 return r;
876
877 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
878 struct dma_fence *f;
879
880 bo_va = fpriv->csa_va;
881 BUG_ON(!bo_va);
882 r = amdgpu_vm_bo_update(adev, bo_va, false);
883 if (r)
884 return r;
885
886 f = bo_va->last_pt_update;
887 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
888 if (r)
889 return r;
890 }
891
892 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
893 struct dma_fence *f;
894
895
896 bo = ttm_to_amdgpu_bo(e->tv.bo);
897 if (!bo)
898 continue;
899
900 bo_va = e->bo_va;
901 if (bo_va == NULL)
902 continue;
903
904 r = amdgpu_vm_bo_update(adev, bo_va, false);
905 if (r)
906 return r;
907
908 f = bo_va->last_pt_update;
909 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
910 if (r)
911 return r;
912 }
913
914 r = amdgpu_vm_handle_moved(adev, vm);
915 if (r)
916 return r;
917
918 r = amdgpu_vm_update_directories(adev, vm);
919 if (r)
920 return r;
921
922 r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
923 if (r)
924 return r;
925
926 p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
927
928 if (amdgpu_vm_debug) {
929
930 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
931 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
932
933
934 if (!bo)
935 continue;
936
937 amdgpu_vm_bo_invalidate(adev, bo, false);
938 }
939 }
940
941 return amdgpu_cs_sync_rings(p);
942}
943
944static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
945 struct amdgpu_cs_parser *parser)
946{
947 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
948 struct amdgpu_vm *vm = &fpriv->vm;
949 int r, ce_preempt = 0, de_preempt = 0;
950 struct amdgpu_ring *ring;
951 int i, j;
952
953 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
954 struct amdgpu_cs_chunk *chunk;
955 struct amdgpu_ib *ib;
956 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
957 struct drm_sched_entity *entity;
958
959 chunk = &parser->chunks[i];
960 ib = &parser->job->ibs[j];
961 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
962
963 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
964 continue;
965
966 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
967 (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
968 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
969 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
970 ce_preempt++;
971 else
972 de_preempt++;
973 }
974
975
976 if (ce_preempt > 1 || de_preempt > 1)
977 return -EINVAL;
978 }
979
980 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
981 chunk_ib->ip_instance, chunk_ib->ring,
982 &entity);
983 if (r)
984 return r;
985
986 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
987 parser->job->preamble_status |=
988 AMDGPU_PREAMBLE_IB_PRESENT;
989
990 if (parser->entity && parser->entity != entity)
991 return -EINVAL;
992
993 parser->entity = entity;
994
995 ring = to_amdgpu_ring(entity->rq->sched);
996 r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
997 chunk_ib->ib_bytes : 0, ib);
998 if (r) {
999 DRM_ERROR("Failed to get ib !\n");
1000 return r;
1001 }
1002
1003 ib->gpu_addr = chunk_ib->va_start;
1004 ib->length_dw = chunk_ib->ib_bytes / 4;
1005 ib->flags = chunk_ib->flags;
1006
1007 j++;
1008 }
1009
1010
1011 ring = to_amdgpu_ring(parser->entity->rq->sched);
1012 if (parser->job->uf_addr && ring->funcs->no_user_fence)
1013 return -EINVAL;
1014
1015 return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
1016}
1017
1018static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1019 struct amdgpu_cs_chunk *chunk)
1020{
1021 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1022 unsigned num_deps;
1023 int i, r;
1024 struct drm_amdgpu_cs_chunk_dep *deps;
1025
1026 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1027 num_deps = chunk->length_dw * 4 /
1028 sizeof(struct drm_amdgpu_cs_chunk_dep);
1029
1030 for (i = 0; i < num_deps; ++i) {
1031 struct amdgpu_ctx *ctx;
1032 struct drm_sched_entity *entity;
1033 struct dma_fence *fence;
1034
1035 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1036 if (ctx == NULL)
1037 return -EINVAL;
1038
1039 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
1040 deps[i].ip_instance,
1041 deps[i].ring, &entity);
1042 if (r) {
1043 amdgpu_ctx_put(ctx);
1044 return r;
1045 }
1046
1047 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
1048 amdgpu_ctx_put(ctx);
1049
1050 if (IS_ERR(fence))
1051 return PTR_ERR(fence);
1052 else if (!fence)
1053 continue;
1054
1055 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1056 struct drm_sched_fence *s_fence;
1057 struct dma_fence *old = fence;
1058
1059 s_fence = to_drm_sched_fence(fence);
1060 fence = dma_fence_get(&s_fence->scheduled);
1061 dma_fence_put(old);
1062 }
1063
1064 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1065 dma_fence_put(fence);
1066 if (r)
1067 return r;
1068 }
1069 return 0;
1070}
1071
1072static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1073 uint32_t handle, u64 point,
1074 u64 flags)
1075{
1076 struct dma_fence *fence;
1077 int r;
1078
1079 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1080 if (r) {
1081 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1082 handle, point, r);
1083 return r;
1084 }
1085
1086 r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1087 dma_fence_put(fence);
1088
1089 return r;
1090}
1091
1092static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1093 struct amdgpu_cs_chunk *chunk)
1094{
1095 struct drm_amdgpu_cs_chunk_sem *deps;
1096 unsigned num_deps;
1097 int i, r;
1098
1099 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1100 num_deps = chunk->length_dw * 4 /
1101 sizeof(struct drm_amdgpu_cs_chunk_sem);
1102 for (i = 0; i < num_deps; ++i) {
1103 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1104 0, 0);
1105 if (r)
1106 return r;
1107 }
1108
1109 return 0;
1110}
1111
1112
1113static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1114 struct amdgpu_cs_chunk *chunk)
1115{
1116 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1117 unsigned num_deps;
1118 int i, r;
1119
1120 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1121 num_deps = chunk->length_dw * 4 /
1122 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1123 for (i = 0; i < num_deps; ++i) {
1124 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1125 syncobj_deps[i].handle,
1126 syncobj_deps[i].point,
1127 syncobj_deps[i].flags);
1128 if (r)
1129 return r;
1130 }
1131
1132 return 0;
1133}
1134
1135static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1136 struct amdgpu_cs_chunk *chunk)
1137{
1138 struct drm_amdgpu_cs_chunk_sem *deps;
1139 unsigned num_deps;
1140 int i;
1141
1142 deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1143 num_deps = chunk->length_dw * 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1150 GFP_KERNEL);
1151 p->num_post_deps = 0;
1152
1153 if (!p->post_deps)
1154 return -ENOMEM;
1155
1156
1157 for (i = 0; i < num_deps; ++i) {
1158 p->post_deps[i].syncobj =
1159 drm_syncobj_find(p->filp, deps[i].handle);
1160 if (!p->post_deps[i].syncobj)
1161 return -EINVAL;
1162 p->post_deps[i].chain = NULL;
1163 p->post_deps[i].point = 0;
1164 p->num_post_deps++;
1165 }
1166
1167 return 0;
1168}
1169
1170
1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1172 struct amdgpu_cs_chunk *chunk)
1173{
1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1175 unsigned num_deps;
1176 int i;
1177
1178 syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1179 num_deps = chunk->length_dw * 4 /
1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1186 GFP_KERNEL);
1187 p->num_post_deps = 0;
1188
1189 if (!p->post_deps)
1190 return -ENOMEM;
1191
1192 for (i = 0; i < num_deps; ++i) {
1193 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1194
1195 dep->chain = NULL;
1196 if (syncobj_deps[i].point) {
1197 dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1198 if (!dep->chain)
1199 return -ENOMEM;
1200 }
1201
1202 dep->syncobj = drm_syncobj_find(p->filp,
1203 syncobj_deps[i].handle);
1204 if (!dep->syncobj) {
1205 kfree(dep->chain);
1206 return -EINVAL;
1207 }
1208 dep->point = syncobj_deps[i].point;
1209 p->num_post_deps++;
1210 }
1211
1212 return 0;
1213}
1214
1215static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1216 struct amdgpu_cs_parser *p)
1217{
1218 int i, r;
1219
1220 for (i = 0; i < p->nchunks; ++i) {
1221 struct amdgpu_cs_chunk *chunk;
1222
1223 chunk = &p->chunks[i];
1224
1225 switch (chunk->chunk_id) {
1226 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1227 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1228 r = amdgpu_cs_process_fence_dep(p, chunk);
1229 if (r)
1230 return r;
1231 break;
1232 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1233 r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1234 if (r)
1235 return r;
1236 break;
1237 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1238 r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1239 if (r)
1240 return r;
1241 break;
1242 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1243 r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1244 if (r)
1245 return r;
1246 break;
1247 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1248 r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1249 if (r)
1250 return r;
1251 break;
1252 }
1253 }
1254
1255 return 0;
1256}
1257
1258static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1259{
1260 int i;
1261
1262 for (i = 0; i < p->num_post_deps; ++i) {
1263 if (p->post_deps[i].chain && p->post_deps[i].point) {
1264 drm_syncobj_add_point(p->post_deps[i].syncobj,
1265 p->post_deps[i].chain,
1266 p->fence, p->post_deps[i].point);
1267 p->post_deps[i].chain = NULL;
1268 } else {
1269 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1270 p->fence);
1271 }
1272 }
1273}
1274
1275static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1276 union drm_amdgpu_cs *cs)
1277{
1278 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1279 struct drm_sched_entity *entity = p->entity;
1280 enum drm_sched_priority priority;
1281 struct amdgpu_ring *ring;
1282 struct amdgpu_bo_list_entry *e;
1283 struct amdgpu_job *job;
1284 uint64_t seq;
1285 int r;
1286
1287 job = p->job;
1288 p->job = NULL;
1289
1290 r = drm_sched_job_init(&job->base, entity, p->filp);
1291 if (r)
1292 goto error_unlock;
1293
1294
1295
1296
1297
1298 amdgpu_mn_lock(p->mn);
1299
1300
1301
1302
1303 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1304 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1305
1306 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1307 }
1308 if (r) {
1309 r = -EAGAIN;
1310 goto error_abort;
1311 }
1312
1313 job->owner = p->filp;
1314 p->fence = dma_fence_get(&job->base.s_fence->finished);
1315
1316 amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1317 amdgpu_cs_post_dependencies(p);
1318
1319 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1320 !p->ctx->preamble_presented) {
1321 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1322 p->ctx->preamble_presented = true;
1323 }
1324
1325 cs->out.handle = seq;
1326 job->uf_sequence = seq;
1327
1328 amdgpu_job_free_resources(job);
1329
1330 trace_amdgpu_cs_ioctl(job);
1331 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1332 priority = job->base.s_priority;
1333 drm_sched_entity_push_job(&job->base, entity);
1334
1335 ring = to_amdgpu_ring(entity->rq->sched);
1336 amdgpu_ring_priority_get(ring, priority);
1337
1338 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1339
1340 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1341 amdgpu_mn_unlock(p->mn);
1342
1343 return 0;
1344
1345error_abort:
1346 drm_sched_job_cleanup(&job->base);
1347 amdgpu_mn_unlock(p->mn);
1348
1349error_unlock:
1350 amdgpu_job_free(job);
1351 return r;
1352}
1353
1354int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1355{
1356 struct amdgpu_device *adev = dev->dev_private;
1357 union drm_amdgpu_cs *cs = data;
1358 struct amdgpu_cs_parser parser = {};
1359 bool reserved_buffers = false;
1360 int i, r;
1361
1362 if (!adev->accel_working)
1363 return -EBUSY;
1364
1365 parser.adev = adev;
1366 parser.filp = filp;
1367
1368 r = amdgpu_cs_parser_init(&parser, data);
1369 if (r) {
1370 DRM_ERROR("Failed to initialize parser %d!\n", r);
1371 goto out;
1372 }
1373
1374 r = amdgpu_cs_ib_fill(adev, &parser);
1375 if (r)
1376 goto out;
1377
1378 r = amdgpu_cs_dependencies(adev, &parser);
1379 if (r) {
1380 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1381 goto out;
1382 }
1383
1384 r = amdgpu_cs_parser_bos(&parser, data);
1385 if (r) {
1386 if (r == -ENOMEM)
1387 DRM_ERROR("Not enough memory for command submission!\n");
1388 else if (r != -ERESTARTSYS && r != -EAGAIN)
1389 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1390 goto out;
1391 }
1392
1393 reserved_buffers = true;
1394
1395 for (i = 0; i < parser.job->num_ibs; i++)
1396 trace_amdgpu_cs(&parser, i);
1397
1398 r = amdgpu_cs_vm_handling(&parser);
1399 if (r)
1400 goto out;
1401
1402 r = amdgpu_cs_submit(&parser, cs);
1403
1404out:
1405 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1406
1407 return r;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1420 struct drm_file *filp)
1421{
1422 union drm_amdgpu_wait_cs *wait = data;
1423 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1424 struct drm_sched_entity *entity;
1425 struct amdgpu_ctx *ctx;
1426 struct dma_fence *fence;
1427 long r;
1428
1429 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1430 if (ctx == NULL)
1431 return -EINVAL;
1432
1433 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1434 wait->in.ring, &entity);
1435 if (r) {
1436 amdgpu_ctx_put(ctx);
1437 return r;
1438 }
1439
1440 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1441 if (IS_ERR(fence))
1442 r = PTR_ERR(fence);
1443 else if (fence) {
1444 r = dma_fence_wait_timeout(fence, true, timeout);
1445 if (r > 0 && fence->error)
1446 r = fence->error;
1447 dma_fence_put(fence);
1448 } else
1449 r = 1;
1450
1451 amdgpu_ctx_put(ctx);
1452 if (r < 0)
1453 return r;
1454
1455 memset(wait, 0, sizeof(*wait));
1456 wait->out.status = (r == 0);
1457
1458 return 0;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1469 struct drm_file *filp,
1470 struct drm_amdgpu_fence *user)
1471{
1472 struct drm_sched_entity *entity;
1473 struct amdgpu_ctx *ctx;
1474 struct dma_fence *fence;
1475 int r;
1476
1477 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1478 if (ctx == NULL)
1479 return ERR_PTR(-EINVAL);
1480
1481 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1482 user->ring, &entity);
1483 if (r) {
1484 amdgpu_ctx_put(ctx);
1485 return ERR_PTR(r);
1486 }
1487
1488 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1489 amdgpu_ctx_put(ctx);
1490
1491 return fence;
1492}
1493
1494int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1495 struct drm_file *filp)
1496{
1497 struct amdgpu_device *adev = dev->dev_private;
1498 union drm_amdgpu_fence_to_handle *info = data;
1499 struct dma_fence *fence;
1500 struct drm_syncobj *syncobj;
1501 struct sync_file *sync_file;
1502 int fd, r;
1503
1504 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1505 if (IS_ERR(fence))
1506 return PTR_ERR(fence);
1507
1508 if (!fence)
1509 fence = dma_fence_get_stub();
1510
1511 switch (info->in.what) {
1512 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1513 r = drm_syncobj_create(&syncobj, 0, fence);
1514 dma_fence_put(fence);
1515 if (r)
1516 return r;
1517 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1518 drm_syncobj_put(syncobj);
1519 return r;
1520
1521 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1522 r = drm_syncobj_create(&syncobj, 0, fence);
1523 dma_fence_put(fence);
1524 if (r)
1525 return r;
1526 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1527 drm_syncobj_put(syncobj);
1528 return r;
1529
1530 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1531 fd = get_unused_fd_flags(O_CLOEXEC);
1532 if (fd < 0) {
1533 dma_fence_put(fence);
1534 return fd;
1535 }
1536
1537 sync_file = sync_file_create(fence);
1538 dma_fence_put(fence);
1539 if (!sync_file) {
1540 put_unused_fd(fd);
1541 return -ENOMEM;
1542 }
1543
1544 fd_install(fd, sync_file->file);
1545 info->out.handle = fd;
1546 return 0;
1547
1548 default:
1549 return -EINVAL;
1550 }
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1562 struct drm_file *filp,
1563 union drm_amdgpu_wait_fences *wait,
1564 struct drm_amdgpu_fence *fences)
1565{
1566 uint32_t fence_count = wait->in.fence_count;
1567 unsigned int i;
1568 long r = 1;
1569
1570 for (i = 0; i < fence_count; i++) {
1571 struct dma_fence *fence;
1572 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1573
1574 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1575 if (IS_ERR(fence))
1576 return PTR_ERR(fence);
1577 else if (!fence)
1578 continue;
1579
1580 r = dma_fence_wait_timeout(fence, true, timeout);
1581 dma_fence_put(fence);
1582 if (r < 0)
1583 return r;
1584
1585 if (r == 0)
1586 break;
1587
1588 if (fence->error)
1589 return fence->error;
1590 }
1591
1592 memset(wait, 0, sizeof(*wait));
1593 wait->out.status = (r > 0);
1594
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1607 struct drm_file *filp,
1608 union drm_amdgpu_wait_fences *wait,
1609 struct drm_amdgpu_fence *fences)
1610{
1611 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1612 uint32_t fence_count = wait->in.fence_count;
1613 uint32_t first = ~0;
1614 struct dma_fence **array;
1615 unsigned int i;
1616 long r;
1617
1618
1619 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1620
1621 if (array == NULL)
1622 return -ENOMEM;
1623
1624 for (i = 0; i < fence_count; i++) {
1625 struct dma_fence *fence;
1626
1627 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1628 if (IS_ERR(fence)) {
1629 r = PTR_ERR(fence);
1630 goto err_free_fence_array;
1631 } else if (fence) {
1632 array[i] = fence;
1633 } else {
1634 r = 1;
1635 first = i;
1636 goto out;
1637 }
1638 }
1639
1640 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1641 &first);
1642 if (r < 0)
1643 goto err_free_fence_array;
1644
1645out:
1646 memset(wait, 0, sizeof(*wait));
1647 wait->out.status = (r > 0);
1648 wait->out.first_signaled = first;
1649
1650 if (first < fence_count && array[first])
1651 r = array[first]->error;
1652 else
1653 r = 0;
1654
1655err_free_fence_array:
1656 for (i = 0; i < fence_count; i++)
1657 dma_fence_put(array[i]);
1658 kfree(array);
1659
1660 return r;
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1671 struct drm_file *filp)
1672{
1673 struct amdgpu_device *adev = dev->dev_private;
1674 union drm_amdgpu_wait_fences *wait = data;
1675 uint32_t fence_count = wait->in.fence_count;
1676 struct drm_amdgpu_fence *fences_user;
1677 struct drm_amdgpu_fence *fences;
1678 int r;
1679
1680
1681 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1682 GFP_KERNEL);
1683 if (fences == NULL)
1684 return -ENOMEM;
1685
1686 fences_user = u64_to_user_ptr(wait->in.fences);
1687 if (copy_from_user(fences, fences_user,
1688 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1689 r = -EFAULT;
1690 goto err_free_fences;
1691 }
1692
1693 if (wait->in.wait_all)
1694 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1695 else
1696 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1697
1698err_free_fences:
1699 kfree(fences);
1700
1701 return r;
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1716 uint64_t addr, struct amdgpu_bo **bo,
1717 struct amdgpu_bo_va_mapping **map)
1718{
1719 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1720 struct ttm_operation_ctx ctx = { false, false };
1721 struct amdgpu_vm *vm = &fpriv->vm;
1722 struct amdgpu_bo_va_mapping *mapping;
1723 int r;
1724
1725 addr /= AMDGPU_GPU_PAGE_SIZE;
1726
1727 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1728 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1729 return -EINVAL;
1730
1731 *bo = mapping->bo_va->base.bo;
1732 *map = mapping;
1733
1734
1735 if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1736 return -EINVAL;
1737
1738 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1739 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1740 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1741 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1742 if (r)
1743 return r;
1744 }
1745
1746 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1747}
1748