1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/sched/signal.h>
30#include <linux/dma-fence-array.h>
31
32#include "uapi/drm/vc4_drm.h"
33#include "vc4_drv.h"
34#include "vc4_regs.h"
35#include "vc4_trace.h"
36
37static void
38vc4_queue_hangcheck(struct drm_device *dev)
39{
40 struct vc4_dev *vc4 = to_vc4_dev(dev);
41
42 mod_timer(&vc4->hangcheck.timer,
43 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
44}
45
46struct vc4_hang_state {
47 struct drm_vc4_get_hang_state user_state;
48
49 u32 bo_count;
50 struct drm_gem_object **bo;
51};
52
53static void
54vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
55{
56 unsigned int i;
57
58 for (i = 0; i < state->user_state.bo_count; i++)
59 drm_gem_object_put_unlocked(state->bo[i]);
60
61 kfree(state);
62}
63
64int
65vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
66 struct drm_file *file_priv)
67{
68 struct drm_vc4_get_hang_state *get_state = data;
69 struct drm_vc4_get_hang_state_bo *bo_state;
70 struct vc4_hang_state *kernel_state;
71 struct drm_vc4_get_hang_state *state;
72 struct vc4_dev *vc4 = to_vc4_dev(dev);
73 unsigned long irqflags;
74 u32 i;
75 int ret = 0;
76
77 spin_lock_irqsave(&vc4->job_lock, irqflags);
78 kernel_state = vc4->hang_state;
79 if (!kernel_state) {
80 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
81 return -ENOENT;
82 }
83 state = &kernel_state->user_state;
84
85
86
87
88 if (get_state->bo_count < state->bo_count) {
89 get_state->bo_count = state->bo_count;
90 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
91 return 0;
92 }
93
94 vc4->hang_state = NULL;
95 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
96
97
98 state->bo = get_state->bo;
99 memcpy(get_state, state, sizeof(*state));
100
101 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
102 if (!bo_state) {
103 ret = -ENOMEM;
104 goto err_free;
105 }
106
107 for (i = 0; i < state->bo_count; i++) {
108 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
109 u32 handle;
110
111 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
112 &handle);
113
114 if (ret) {
115 state->bo_count = i;
116 goto err_delete_handle;
117 }
118 bo_state[i].handle = handle;
119 bo_state[i].paddr = vc4_bo->base.paddr;
120 bo_state[i].size = vc4_bo->base.base.size;
121 }
122
123 if (copy_to_user(u64_to_user_ptr(get_state->bo),
124 bo_state,
125 state->bo_count * sizeof(*bo_state)))
126 ret = -EFAULT;
127
128err_delete_handle:
129 if (ret) {
130 for (i = 0; i < state->bo_count; i++)
131 drm_gem_handle_delete(file_priv, bo_state[i].handle);
132 }
133
134err_free:
135 vc4_free_hang_state(dev, kernel_state);
136 kfree(bo_state);
137
138 return ret;
139}
140
141static void
142vc4_save_hang_state(struct drm_device *dev)
143{
144 struct vc4_dev *vc4 = to_vc4_dev(dev);
145 struct drm_vc4_get_hang_state *state;
146 struct vc4_hang_state *kernel_state;
147 struct vc4_exec_info *exec[2];
148 struct vc4_bo *bo;
149 unsigned long irqflags;
150 unsigned int i, j, k, unref_list_count;
151
152 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
153 if (!kernel_state)
154 return;
155
156 state = &kernel_state->user_state;
157
158 spin_lock_irqsave(&vc4->job_lock, irqflags);
159 exec[0] = vc4_first_bin_job(vc4);
160 exec[1] = vc4_first_render_job(vc4);
161 if (!exec[0] && !exec[1]) {
162 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
163 return;
164 }
165
166
167 state->bo_count = 0;
168 for (i = 0; i < 2; i++) {
169 if (!exec[i])
170 continue;
171
172 unref_list_count = 0;
173 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
174 unref_list_count++;
175 state->bo_count += exec[i]->bo_count + unref_list_count;
176 }
177
178 kernel_state->bo = kcalloc(state->bo_count,
179 sizeof(*kernel_state->bo), GFP_ATOMIC);
180
181 if (!kernel_state->bo) {
182 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
183 return;
184 }
185
186 k = 0;
187 for (i = 0; i < 2; i++) {
188 if (!exec[i])
189 continue;
190
191 for (j = 0; j < exec[i]->bo_count; j++) {
192 bo = to_vc4_bo(&exec[i]->bo[j]->base);
193
194
195
196
197
198 WARN_ON(!refcount_read(&bo->usecnt));
199 refcount_inc(&bo->usecnt);
200 drm_gem_object_get(&exec[i]->bo[j]->base);
201 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
202 }
203
204 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
205
206
207
208 drm_gem_object_get(&bo->base.base);
209 kernel_state->bo[k++] = &bo->base.base;
210 }
211 }
212
213 WARN_ON_ONCE(k != state->bo_count);
214
215 if (exec[0])
216 state->start_bin = exec[0]->ct0ca;
217 if (exec[1])
218 state->start_render = exec[1]->ct1ca;
219
220 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
221
222 state->ct0ca = V3D_READ(V3D_CTNCA(0));
223 state->ct0ea = V3D_READ(V3D_CTNEA(0));
224
225 state->ct1ca = V3D_READ(V3D_CTNCA(1));
226 state->ct1ea = V3D_READ(V3D_CTNEA(1));
227
228 state->ct0cs = V3D_READ(V3D_CTNCS(0));
229 state->ct1cs = V3D_READ(V3D_CTNCS(1));
230
231 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
232 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
233
234 state->bpca = V3D_READ(V3D_BPCA);
235 state->bpcs = V3D_READ(V3D_BPCS);
236 state->bpoa = V3D_READ(V3D_BPOA);
237 state->bpos = V3D_READ(V3D_BPOS);
238
239 state->vpmbase = V3D_READ(V3D_VPMBASE);
240
241 state->dbge = V3D_READ(V3D_DBGE);
242 state->fdbgo = V3D_READ(V3D_FDBGO);
243 state->fdbgb = V3D_READ(V3D_FDBGB);
244 state->fdbgr = V3D_READ(V3D_FDBGR);
245 state->fdbgs = V3D_READ(V3D_FDBGS);
246 state->errstat = V3D_READ(V3D_ERRSTAT);
247
248
249
250
251
252
253
254
255 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
256 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
257
258 if (bo->madv == __VC4_MADV_NOTSUPP)
259 continue;
260
261 mutex_lock(&bo->madv_lock);
262 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
263 bo->madv = VC4_MADV_WILLNEED;
264 refcount_dec(&bo->usecnt);
265 mutex_unlock(&bo->madv_lock);
266 }
267
268 spin_lock_irqsave(&vc4->job_lock, irqflags);
269 if (vc4->hang_state) {
270 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
271 vc4_free_hang_state(dev, kernel_state);
272 } else {
273 vc4->hang_state = kernel_state;
274 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 }
276}
277
278static void
279vc4_reset(struct drm_device *dev)
280{
281 struct vc4_dev *vc4 = to_vc4_dev(dev);
282
283 DRM_INFO("Resetting GPU.\n");
284
285 mutex_lock(&vc4->power_lock);
286 if (vc4->power_refcount) {
287
288
289
290 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
291 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
292 }
293 mutex_unlock(&vc4->power_lock);
294
295 vc4_irq_reset(dev);
296
297
298
299
300
301 vc4_queue_hangcheck(dev);
302}
303
304static void
305vc4_reset_work(struct work_struct *work)
306{
307 struct vc4_dev *vc4 =
308 container_of(work, struct vc4_dev, hangcheck.reset_work);
309
310 vc4_save_hang_state(vc4->dev);
311
312 vc4_reset(vc4->dev);
313}
314
315static void
316vc4_hangcheck_elapsed(struct timer_list *t)
317{
318 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
319 struct drm_device *dev = vc4->dev;
320 uint32_t ct0ca, ct1ca;
321 unsigned long irqflags;
322 struct vc4_exec_info *bin_exec, *render_exec;
323
324 spin_lock_irqsave(&vc4->job_lock, irqflags);
325
326 bin_exec = vc4_first_bin_job(vc4);
327 render_exec = vc4_first_render_job(vc4);
328
329
330 if (!bin_exec && !render_exec) {
331 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
332 return;
333 }
334
335 ct0ca = V3D_READ(V3D_CTNCA(0));
336 ct1ca = V3D_READ(V3D_CTNCA(1));
337
338
339
340
341 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
342 (render_exec && ct1ca != render_exec->last_ct1ca)) {
343 if (bin_exec)
344 bin_exec->last_ct0ca = ct0ca;
345 if (render_exec)
346 render_exec->last_ct1ca = ct1ca;
347 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
348 vc4_queue_hangcheck(dev);
349 return;
350 }
351
352 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
353
354
355
356
357
358 schedule_work(&vc4->hangcheck.reset_work);
359}
360
361static void
362submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
363{
364 struct vc4_dev *vc4 = to_vc4_dev(dev);
365
366
367
368
369 V3D_WRITE(V3D_CTNCA(thread), start);
370 V3D_WRITE(V3D_CTNEA(thread), end);
371}
372
373int
374vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
375 bool interruptible)
376{
377 struct vc4_dev *vc4 = to_vc4_dev(dev);
378 int ret = 0;
379 unsigned long timeout_expire;
380 DEFINE_WAIT(wait);
381
382 if (vc4->finished_seqno >= seqno)
383 return 0;
384
385 if (timeout_ns == 0)
386 return -ETIME;
387
388 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
389
390 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
391 for (;;) {
392 prepare_to_wait(&vc4->job_wait_queue, &wait,
393 interruptible ? TASK_INTERRUPTIBLE :
394 TASK_UNINTERRUPTIBLE);
395
396 if (interruptible && signal_pending(current)) {
397 ret = -ERESTARTSYS;
398 break;
399 }
400
401 if (vc4->finished_seqno >= seqno)
402 break;
403
404 if (timeout_ns != ~0ull) {
405 if (time_after_eq(jiffies, timeout_expire)) {
406 ret = -ETIME;
407 break;
408 }
409 schedule_timeout(timeout_expire - jiffies);
410 } else {
411 schedule();
412 }
413 }
414
415 finish_wait(&vc4->job_wait_queue, &wait);
416 trace_vc4_wait_for_seqno_end(dev, seqno);
417
418 return ret;
419}
420
421static void
422vc4_flush_caches(struct drm_device *dev)
423{
424 struct vc4_dev *vc4 = to_vc4_dev(dev);
425
426
427
428
429
430 V3D_WRITE(V3D_L2CACTL,
431 V3D_L2CACTL_L2CCLR);
432
433 V3D_WRITE(V3D_SLCACTL,
434 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
435 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
436 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
437 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
438}
439
440static void
441vc4_flush_texture_caches(struct drm_device *dev)
442{
443 struct vc4_dev *vc4 = to_vc4_dev(dev);
444
445 V3D_WRITE(V3D_L2CACTL,
446 V3D_L2CACTL_L2CCLR);
447
448 V3D_WRITE(V3D_SLCACTL,
449 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
450 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
451}
452
453
454
455
456
457
458void
459vc4_submit_next_bin_job(struct drm_device *dev)
460{
461 struct vc4_dev *vc4 = to_vc4_dev(dev);
462 struct vc4_exec_info *exec;
463
464again:
465 exec = vc4_first_bin_job(vc4);
466 if (!exec)
467 return;
468
469 vc4_flush_caches(dev);
470
471
472
473
474 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
475 vc4_perfmon_start(vc4, exec->perfmon);
476
477
478
479
480 if (exec->ct0ca != exec->ct0ea) {
481 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
482 } else {
483 struct vc4_exec_info *next;
484
485 vc4_move_job_to_render(dev, exec);
486 next = vc4_first_bin_job(vc4);
487
488
489
490
491
492
493 if (next && next->perfmon == exec->perfmon)
494 goto again;
495 }
496}
497
498void
499vc4_submit_next_render_job(struct drm_device *dev)
500{
501 struct vc4_dev *vc4 = to_vc4_dev(dev);
502 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
503
504 if (!exec)
505 return;
506
507
508
509
510
511
512
513 vc4_flush_texture_caches(dev);
514
515 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
516}
517
518void
519vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
520{
521 struct vc4_dev *vc4 = to_vc4_dev(dev);
522 bool was_empty = list_empty(&vc4->render_job_list);
523
524 list_move_tail(&exec->head, &vc4->render_job_list);
525 if (was_empty)
526 vc4_submit_next_render_job(dev);
527}
528
529static void
530vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
531{
532 struct vc4_bo *bo;
533 unsigned i;
534
535 for (i = 0; i < exec->bo_count; i++) {
536 bo = to_vc4_bo(&exec->bo[i]->base);
537 bo->seqno = seqno;
538
539 reservation_object_add_shared_fence(bo->resv, exec->fence);
540 }
541
542 list_for_each_entry(bo, &exec->unref_list, unref_head) {
543 bo->seqno = seqno;
544 }
545
546 for (i = 0; i < exec->rcl_write_bo_count; i++) {
547 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
548 bo->write_seqno = seqno;
549
550 reservation_object_add_excl_fence(bo->resv, exec->fence);
551 }
552}
553
554static void
555vc4_unlock_bo_reservations(struct drm_device *dev,
556 struct vc4_exec_info *exec,
557 struct ww_acquire_ctx *acquire_ctx)
558{
559 int i;
560
561 for (i = 0; i < exec->bo_count; i++) {
562 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
563
564 ww_mutex_unlock(&bo->resv->lock);
565 }
566
567 ww_acquire_fini(acquire_ctx);
568}
569
570
571
572
573
574
575
576
577static int
578vc4_lock_bo_reservations(struct drm_device *dev,
579 struct vc4_exec_info *exec,
580 struct ww_acquire_ctx *acquire_ctx)
581{
582 int contended_lock = -1;
583 int i, ret;
584 struct vc4_bo *bo;
585
586 ww_acquire_init(acquire_ctx, &reservation_ww_class);
587
588retry:
589 if (contended_lock != -1) {
590 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
591 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
592 acquire_ctx);
593 if (ret) {
594 ww_acquire_done(acquire_ctx);
595 return ret;
596 }
597 }
598
599 for (i = 0; i < exec->bo_count; i++) {
600 if (i == contended_lock)
601 continue;
602
603 bo = to_vc4_bo(&exec->bo[i]->base);
604
605 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
606 if (ret) {
607 int j;
608
609 for (j = 0; j < i; j++) {
610 bo = to_vc4_bo(&exec->bo[j]->base);
611 ww_mutex_unlock(&bo->resv->lock);
612 }
613
614 if (contended_lock != -1 && contended_lock >= i) {
615 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
616
617 ww_mutex_unlock(&bo->resv->lock);
618 }
619
620 if (ret == -EDEADLK) {
621 contended_lock = i;
622 goto retry;
623 }
624
625 ww_acquire_done(acquire_ctx);
626 return ret;
627 }
628 }
629
630 ww_acquire_done(acquire_ctx);
631
632
633
634
635 for (i = 0; i < exec->bo_count; i++) {
636 bo = to_vc4_bo(&exec->bo[i]->base);
637
638 ret = reservation_object_reserve_shared(bo->resv, 1);
639 if (ret) {
640 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
641 return ret;
642 }
643 }
644
645 return 0;
646}
647
648
649
650
651
652
653
654
655
656
657static int
658vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
659 struct ww_acquire_ctx *acquire_ctx,
660 struct drm_syncobj *out_sync)
661{
662 struct vc4_dev *vc4 = to_vc4_dev(dev);
663 struct vc4_exec_info *renderjob;
664 uint64_t seqno;
665 unsigned long irqflags;
666 struct vc4_fence *fence;
667
668 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
669 if (!fence)
670 return -ENOMEM;
671 fence->dev = dev;
672
673 spin_lock_irqsave(&vc4->job_lock, irqflags);
674
675 seqno = ++vc4->emit_seqno;
676 exec->seqno = seqno;
677
678 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
679 vc4->dma_fence_context, exec->seqno);
680 fence->seqno = exec->seqno;
681 exec->fence = &fence->base;
682
683 if (out_sync)
684 drm_syncobj_replace_fence(out_sync, exec->fence);
685
686 vc4_update_bo_seqnos(exec, seqno);
687
688 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
689
690 list_add_tail(&exec->head, &vc4->bin_job_list);
691
692
693
694
695
696
697 renderjob = vc4_first_render_job(vc4);
698 if (vc4_first_bin_job(vc4) == exec &&
699 (!renderjob || renderjob->perfmon == exec->perfmon)) {
700 vc4_submit_next_bin_job(dev);
701 vc4_queue_hangcheck(dev);
702 }
703
704 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
705
706 return 0;
707}
708
709
710
711
712
713
714
715
716
717
718
719
720static int
721vc4_cl_lookup_bos(struct drm_device *dev,
722 struct drm_file *file_priv,
723 struct vc4_exec_info *exec)
724{
725 struct drm_vc4_submit_cl *args = exec->args;
726 uint32_t *handles;
727 int ret = 0;
728 int i;
729
730 exec->bo_count = args->bo_handle_count;
731
732 if (!exec->bo_count) {
733
734
735
736 DRM_DEBUG("Rendering requires BOs to validate\n");
737 return -EINVAL;
738 }
739
740 exec->bo = kvmalloc_array(exec->bo_count,
741 sizeof(struct drm_gem_cma_object *),
742 GFP_KERNEL | __GFP_ZERO);
743 if (!exec->bo) {
744 DRM_ERROR("Failed to allocate validated BO pointers\n");
745 return -ENOMEM;
746 }
747
748 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
749 if (!handles) {
750 ret = -ENOMEM;
751 DRM_ERROR("Failed to allocate incoming GEM handles\n");
752 goto fail;
753 }
754
755 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
756 exec->bo_count * sizeof(uint32_t))) {
757 ret = -EFAULT;
758 DRM_ERROR("Failed to copy in GEM handles\n");
759 goto fail;
760 }
761
762 spin_lock(&file_priv->table_lock);
763 for (i = 0; i < exec->bo_count; i++) {
764 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
765 handles[i]);
766 if (!bo) {
767 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
768 i, handles[i]);
769 ret = -EINVAL;
770 break;
771 }
772
773 drm_gem_object_get(bo);
774 exec->bo[i] = (struct drm_gem_cma_object *)bo;
775 }
776 spin_unlock(&file_priv->table_lock);
777
778 if (ret)
779 goto fail_put_bo;
780
781 for (i = 0; i < exec->bo_count; i++) {
782 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
783 if (ret)
784 goto fail_dec_usecnt;
785 }
786
787 kvfree(handles);
788 return 0;
789
790fail_dec_usecnt:
791
792
793
794
795
796
797
798
799 for (i-- ; i >= 0; i--)
800 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
801
802fail_put_bo:
803
804 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
805 drm_gem_object_put_unlocked(&exec->bo[i]->base);
806
807fail:
808 kvfree(handles);
809 kvfree(exec->bo);
810 exec->bo = NULL;
811 return ret;
812}
813
814static int
815vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
816{
817 struct drm_vc4_submit_cl *args = exec->args;
818 void *temp = NULL;
819 void *bin;
820 int ret = 0;
821 uint32_t bin_offset = 0;
822 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
823 16);
824 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
825 uint32_t exec_size = uniforms_offset + args->uniforms_size;
826 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
827 args->shader_rec_count);
828 struct vc4_bo *bo;
829
830 if (shader_rec_offset < args->bin_cl_size ||
831 uniforms_offset < shader_rec_offset ||
832 exec_size < uniforms_offset ||
833 args->shader_rec_count >= (UINT_MAX /
834 sizeof(struct vc4_shader_state)) ||
835 temp_size < exec_size) {
836 DRM_DEBUG("overflow in exec arguments\n");
837 ret = -EINVAL;
838 goto fail;
839 }
840
841
842
843
844
845
846
847
848 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
849 if (!temp) {
850 DRM_ERROR("Failed to allocate storage for copying "
851 "in bin/render CLs.\n");
852 ret = -ENOMEM;
853 goto fail;
854 }
855 bin = temp + bin_offset;
856 exec->shader_rec_u = temp + shader_rec_offset;
857 exec->uniforms_u = temp + uniforms_offset;
858 exec->shader_state = temp + exec_size;
859 exec->shader_state_size = args->shader_rec_count;
860
861 if (copy_from_user(bin,
862 u64_to_user_ptr(args->bin_cl),
863 args->bin_cl_size)) {
864 ret = -EFAULT;
865 goto fail;
866 }
867
868 if (copy_from_user(exec->shader_rec_u,
869 u64_to_user_ptr(args->shader_rec),
870 args->shader_rec_size)) {
871 ret = -EFAULT;
872 goto fail;
873 }
874
875 if (copy_from_user(exec->uniforms_u,
876 u64_to_user_ptr(args->uniforms),
877 args->uniforms_size)) {
878 ret = -EFAULT;
879 goto fail;
880 }
881
882 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
883 if (IS_ERR(bo)) {
884 DRM_ERROR("Couldn't allocate BO for binning\n");
885 ret = PTR_ERR(bo);
886 goto fail;
887 }
888 exec->exec_bo = &bo->base;
889
890 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
891 &exec->unref_list);
892
893 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
894
895 exec->bin_u = bin;
896
897 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
898 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
899 exec->shader_rec_size = args->shader_rec_size;
900
901 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
902 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
903 exec->uniforms_size = args->uniforms_size;
904
905 ret = vc4_validate_bin_cl(dev,
906 exec->exec_bo->vaddr + bin_offset,
907 bin,
908 exec);
909 if (ret)
910 goto fail;
911
912 ret = vc4_validate_shader_recs(dev, exec);
913 if (ret)
914 goto fail;
915
916
917
918
919
920 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
921
922fail:
923 kvfree(temp);
924 return ret;
925}
926
927static void
928vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
929{
930 struct vc4_dev *vc4 = to_vc4_dev(dev);
931 unsigned long irqflags;
932 unsigned i;
933
934
935
936
937 if (exec->fence) {
938 dma_fence_signal(exec->fence);
939 dma_fence_put(exec->fence);
940 }
941
942 if (exec->bo) {
943 for (i = 0; i < exec->bo_count; i++) {
944 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
945
946 vc4_bo_dec_usecnt(bo);
947 drm_gem_object_put_unlocked(&exec->bo[i]->base);
948 }
949 kvfree(exec->bo);
950 }
951
952 while (!list_empty(&exec->unref_list)) {
953 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
954 struct vc4_bo, unref_head);
955 list_del(&bo->unref_head);
956 drm_gem_object_put_unlocked(&bo->base.base);
957 }
958
959
960 spin_lock_irqsave(&vc4->job_lock, irqflags);
961 vc4->bin_alloc_used &= ~exec->bin_slots;
962 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
963
964
965 vc4_perfmon_put(exec->perfmon);
966
967 mutex_lock(&vc4->power_lock);
968 if (--vc4->power_refcount == 0) {
969 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
970 pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
971 }
972 mutex_unlock(&vc4->power_lock);
973
974 kfree(exec);
975}
976
977void
978vc4_job_handle_completed(struct vc4_dev *vc4)
979{
980 unsigned long irqflags;
981 struct vc4_seqno_cb *cb, *cb_temp;
982
983 spin_lock_irqsave(&vc4->job_lock, irqflags);
984 while (!list_empty(&vc4->job_done_list)) {
985 struct vc4_exec_info *exec =
986 list_first_entry(&vc4->job_done_list,
987 struct vc4_exec_info, head);
988 list_del(&exec->head);
989
990 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
991 vc4_complete_exec(vc4->dev, exec);
992 spin_lock_irqsave(&vc4->job_lock, irqflags);
993 }
994
995 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
996 if (cb->seqno <= vc4->finished_seqno) {
997 list_del_init(&cb->work.entry);
998 schedule_work(&cb->work);
999 }
1000 }
1001
1002 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003}
1004
1005static void vc4_seqno_cb_work(struct work_struct *work)
1006{
1007 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1008
1009 cb->func(cb);
1010}
1011
1012int vc4_queue_seqno_cb(struct drm_device *dev,
1013 struct vc4_seqno_cb *cb, uint64_t seqno,
1014 void (*func)(struct vc4_seqno_cb *cb))
1015{
1016 struct vc4_dev *vc4 = to_vc4_dev(dev);
1017 int ret = 0;
1018 unsigned long irqflags;
1019
1020 cb->func = func;
1021 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1022
1023 spin_lock_irqsave(&vc4->job_lock, irqflags);
1024 if (seqno > vc4->finished_seqno) {
1025 cb->seqno = seqno;
1026 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1027 } else {
1028 schedule_work(&cb->work);
1029 }
1030 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1031
1032 return ret;
1033}
1034
1035
1036
1037
1038
1039static void
1040vc4_job_done_work(struct work_struct *work)
1041{
1042 struct vc4_dev *vc4 =
1043 container_of(work, struct vc4_dev, job_done_work);
1044
1045 vc4_job_handle_completed(vc4);
1046}
1047
1048static int
1049vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1050 uint64_t seqno,
1051 uint64_t *timeout_ns)
1052{
1053 unsigned long start = jiffies;
1054 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1055
1056 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1057 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1058
1059 if (*timeout_ns >= delta)
1060 *timeout_ns -= delta;
1061 }
1062
1063 return ret;
1064}
1065
1066int
1067vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv)
1069{
1070 struct drm_vc4_wait_seqno *args = data;
1071
1072 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1073 &args->timeout_ns);
1074}
1075
1076int
1077vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1079{
1080 int ret;
1081 struct drm_vc4_wait_bo *args = data;
1082 struct drm_gem_object *gem_obj;
1083 struct vc4_bo *bo;
1084
1085 if (args->pad != 0)
1086 return -EINVAL;
1087
1088 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1089 if (!gem_obj) {
1090 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1091 return -EINVAL;
1092 }
1093 bo = to_vc4_bo(gem_obj);
1094
1095 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1096 &args->timeout_ns);
1097
1098 drm_gem_object_put_unlocked(gem_obj);
1099 return ret;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114int
1115vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1116 struct drm_file *file_priv)
1117{
1118 struct vc4_dev *vc4 = to_vc4_dev(dev);
1119 struct vc4_file *vc4file = file_priv->driver_priv;
1120 struct drm_vc4_submit_cl *args = data;
1121 struct drm_syncobj *out_sync = NULL;
1122 struct vc4_exec_info *exec;
1123 struct ww_acquire_ctx acquire_ctx;
1124 struct dma_fence *in_fence;
1125 int ret = 0;
1126
1127 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1128 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1129 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1130 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1131 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1132 return -EINVAL;
1133 }
1134
1135 if (args->pad2 != 0) {
1136 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1137 return -EINVAL;
1138 }
1139
1140 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1141 if (!exec) {
1142 DRM_ERROR("malloc failure on exec struct\n");
1143 return -ENOMEM;
1144 }
1145
1146 mutex_lock(&vc4->power_lock);
1147 if (vc4->power_refcount++ == 0) {
1148 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
1149 if (ret < 0) {
1150 mutex_unlock(&vc4->power_lock);
1151 vc4->power_refcount--;
1152 kfree(exec);
1153 return ret;
1154 }
1155 }
1156 mutex_unlock(&vc4->power_lock);
1157
1158 exec->args = args;
1159 INIT_LIST_HEAD(&exec->unref_list);
1160
1161 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1162 if (ret)
1163 goto fail;
1164
1165 if (args->perfmonid) {
1166 exec->perfmon = vc4_perfmon_find(vc4file,
1167 args->perfmonid);
1168 if (!exec->perfmon) {
1169 ret = -ENOENT;
1170 goto fail;
1171 }
1172 }
1173
1174 if (args->in_sync) {
1175 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1176 0, 0, &in_fence);
1177 if (ret)
1178 goto fail;
1179
1180
1181
1182
1183
1184
1185 if (!dma_fence_match_context(in_fence,
1186 vc4->dma_fence_context)) {
1187 ret = dma_fence_wait(in_fence, true);
1188 if (ret) {
1189 dma_fence_put(in_fence);
1190 goto fail;
1191 }
1192 }
1193
1194 dma_fence_put(in_fence);
1195 }
1196
1197 if (exec->args->bin_cl_size != 0) {
1198 ret = vc4_get_bcl(dev, exec);
1199 if (ret)
1200 goto fail;
1201 } else {
1202 exec->ct0ca = 0;
1203 exec->ct0ea = 0;
1204 }
1205
1206 ret = vc4_get_rcl(dev, exec);
1207 if (ret)
1208 goto fail;
1209
1210 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1211 if (ret)
1212 goto fail;
1213
1214 if (args->out_sync) {
1215 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1216 if (!out_sync) {
1217 ret = -EINVAL;
1218 goto fail;
1219 }
1220
1221
1222
1223
1224
1225
1226 }
1227
1228
1229
1230
1231 exec->args = NULL;
1232
1233 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1234
1235
1236
1237
1238 if (out_sync)
1239 drm_syncobj_put(out_sync);
1240
1241 if (ret)
1242 goto fail;
1243
1244
1245 args->seqno = vc4->emit_seqno;
1246
1247 return 0;
1248
1249fail:
1250 vc4_complete_exec(vc4->dev, exec);
1251
1252 return ret;
1253}
1254
1255void
1256vc4_gem_init(struct drm_device *dev)
1257{
1258 struct vc4_dev *vc4 = to_vc4_dev(dev);
1259
1260 vc4->dma_fence_context = dma_fence_context_alloc(1);
1261
1262 INIT_LIST_HEAD(&vc4->bin_job_list);
1263 INIT_LIST_HEAD(&vc4->render_job_list);
1264 INIT_LIST_HEAD(&vc4->job_done_list);
1265 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1266 spin_lock_init(&vc4->job_lock);
1267
1268 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1269 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1270
1271 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1272
1273 mutex_init(&vc4->power_lock);
1274
1275 INIT_LIST_HEAD(&vc4->purgeable.list);
1276 mutex_init(&vc4->purgeable.lock);
1277}
1278
1279void
1280vc4_gem_destroy(struct drm_device *dev)
1281{
1282 struct vc4_dev *vc4 = to_vc4_dev(dev);
1283
1284
1285
1286
1287 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1288
1289
1290
1291
1292 if (vc4->bin_bo) {
1293 drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1294 vc4->bin_bo = NULL;
1295 }
1296
1297 if (vc4->hang_state)
1298 vc4_free_hang_state(dev, vc4->hang_state);
1299}
1300
1301int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1302 struct drm_file *file_priv)
1303{
1304 struct drm_vc4_gem_madvise *args = data;
1305 struct drm_gem_object *gem_obj;
1306 struct vc4_bo *bo;
1307 int ret;
1308
1309 switch (args->madv) {
1310 case VC4_MADV_DONTNEED:
1311 case VC4_MADV_WILLNEED:
1312 break;
1313 default:
1314 return -EINVAL;
1315 }
1316
1317 if (args->pad != 0)
1318 return -EINVAL;
1319
1320 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1321 if (!gem_obj) {
1322 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1323 return -ENOENT;
1324 }
1325
1326 bo = to_vc4_bo(gem_obj);
1327
1328
1329 if (bo->madv == __VC4_MADV_NOTSUPP) {
1330 DRM_DEBUG("madvise not supported on this BO\n");
1331 ret = -EINVAL;
1332 goto out_put_gem;
1333 }
1334
1335
1336
1337
1338 if (gem_obj->import_attach) {
1339 DRM_DEBUG("madvise not supported on imported BOs\n");
1340 ret = -EINVAL;
1341 goto out_put_gem;
1342 }
1343
1344 mutex_lock(&bo->madv_lock);
1345
1346 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1347 !refcount_read(&bo->usecnt)) {
1348
1349
1350
1351
1352 vc4_bo_add_to_purgeable_pool(bo);
1353 } else if (args->madv == VC4_MADV_WILLNEED &&
1354 bo->madv == VC4_MADV_DONTNEED &&
1355 !refcount_read(&bo->usecnt)) {
1356
1357
1358
1359 vc4_bo_remove_from_purgeable_pool(bo);
1360 }
1361
1362
1363 args->retained = bo->madv != __VC4_MADV_PURGED;
1364
1365
1366 if (bo->madv != __VC4_MADV_PURGED)
1367 bo->madv = args->madv;
1368
1369 mutex_unlock(&bo->madv_lock);
1370
1371 ret = 0;
1372
1373out_put_gem:
1374 drm_gem_object_put_unlocked(gem_obj);
1375
1376 return ret;
1377}
1378