1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 31)
32
33struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
37 struct list_head fence_list;
38 struct work_struct work;
39 u32 user_fence_size;
40 u32 fence_size;
41 u32 event_fence_action_size;
42 bool fifo_down;
43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on;
47 bool seqno_valid;
48
49 u64 ctx;
50};
51
52struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
55};
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72struct vmw_event_fence_action {
73 struct vmw_fence_action action;
74
75 struct drm_pending_event *event;
76 struct vmw_fence_obj *fence;
77 struct drm_device *dev;
78
79 uint32_t *tv_sec;
80 uint32_t *tv_usec;
81};
82
83static struct vmw_fence_manager *
84fman_from_fence(struct vmw_fence_obj *fence)
85{
86 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111static void vmw_fence_obj_destroy(struct dma_fence *f)
112{
113 struct vmw_fence_obj *fence =
114 container_of(f, struct vmw_fence_obj, base);
115
116 struct vmw_fence_manager *fman = fman_from_fence(fence);
117
118 spin_lock(&fman->lock);
119 list_del_init(&fence->head);
120 --fman->num_fence_objects;
121 spin_unlock(&fman->lock);
122 fence->destroy(fence);
123}
124
125static const char *vmw_fence_get_driver_name(struct dma_fence *f)
126{
127 return "vmwgfx";
128}
129
130static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
131{
132 return "svga";
133}
134
135static bool vmw_fence_enable_signaling(struct dma_fence *f)
136{
137 struct vmw_fence_obj *fence =
138 container_of(f, struct vmw_fence_obj, base);
139
140 struct vmw_fence_manager *fman = fman_from_fence(fence);
141 struct vmw_private *dev_priv = fman->dev_priv;
142
143 u32 *fifo_mem = dev_priv->mmio_virt;
144 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
145 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 return false;
147
148 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
149
150 return true;
151}
152
153struct vmwgfx_wait_cb {
154 struct dma_fence_cb base;
155 struct task_struct *task;
156};
157
158static void
159vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
160{
161 struct vmwgfx_wait_cb *wait =
162 container_of(cb, struct vmwgfx_wait_cb, base);
163
164 wake_up_process(wait->task);
165}
166
167static void __vmw_fences_update(struct vmw_fence_manager *fman);
168
169static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
170{
171 struct vmw_fence_obj *fence =
172 container_of(f, struct vmw_fence_obj, base);
173
174 struct vmw_fence_manager *fman = fman_from_fence(fence);
175 struct vmw_private *dev_priv = fman->dev_priv;
176 struct vmwgfx_wait_cb cb;
177 long ret = timeout;
178
179 if (likely(vmw_fence_obj_signaled(fence)))
180 return timeout;
181
182 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
183 vmw_seqno_waiter_add(dev_priv);
184
185 spin_lock(f->lock);
186
187 if (intr && signal_pending(current)) {
188 ret = -ERESTARTSYS;
189 goto out;
190 }
191
192 cb.base.func = vmwgfx_wait_cb;
193 cb.task = current;
194 list_add(&cb.base.node, &f->cb_list);
195
196 for (;;) {
197 __vmw_fences_update(fman);
198
199
200
201
202
203
204 if (intr)
205 __set_current_state(TASK_INTERRUPTIBLE);
206 else
207 __set_current_state(TASK_UNINTERRUPTIBLE);
208
209 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
210 if (ret == 0 && timeout > 0)
211 ret = 1;
212 break;
213 }
214
215 if (intr && signal_pending(current)) {
216 ret = -ERESTARTSYS;
217 break;
218 }
219
220 if (ret == 0)
221 break;
222
223 spin_unlock(f->lock);
224
225 ret = schedule_timeout(ret);
226
227 spin_lock(f->lock);
228 }
229 __set_current_state(TASK_RUNNING);
230 if (!list_empty(&cb.base.node))
231 list_del(&cb.base.node);
232
233out:
234 spin_unlock(f->lock);
235
236 vmw_seqno_waiter_remove(dev_priv);
237
238 return ret;
239}
240
241static const struct dma_fence_ops vmw_fence_ops = {
242 .get_driver_name = vmw_fence_get_driver_name,
243 .get_timeline_name = vmw_fence_get_timeline_name,
244 .enable_signaling = vmw_fence_enable_signaling,
245 .wait = vmw_fence_wait,
246 .release = vmw_fence_obj_destroy,
247};
248
249
250
251
252
253
254
255
256static void vmw_fence_work_func(struct work_struct *work)
257{
258 struct vmw_fence_manager *fman =
259 container_of(work, struct vmw_fence_manager, work);
260 struct list_head list;
261 struct vmw_fence_action *action, *next_action;
262 bool seqno_valid;
263
264 do {
265 INIT_LIST_HEAD(&list);
266 mutex_lock(&fman->goal_irq_mutex);
267
268 spin_lock(&fman->lock);
269 list_splice_init(&fman->cleanup_list, &list);
270 seqno_valid = fman->seqno_valid;
271 spin_unlock(&fman->lock);
272
273 if (!seqno_valid && fman->goal_irq_on) {
274 fman->goal_irq_on = false;
275 vmw_goal_waiter_remove(fman->dev_priv);
276 }
277 mutex_unlock(&fman->goal_irq_mutex);
278
279 if (list_empty(&list))
280 return;
281
282
283
284
285
286
287
288 list_for_each_entry_safe(action, next_action, &list, head) {
289 list_del_init(&action->head);
290 if (action->cleanup)
291 action->cleanup(action);
292 }
293 } while (1);
294}
295
296struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
297{
298 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
299
300 if (unlikely(!fman))
301 return NULL;
302
303 fman->dev_priv = dev_priv;
304 spin_lock_init(&fman->lock);
305 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 fman->fifo_down = true;
309 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
310 TTM_OBJ_EXTRA_SIZE;
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
312 fman->event_fence_action_size =
313 ttm_round_pot(sizeof(struct vmw_event_fence_action));
314 mutex_init(&fman->goal_irq_mutex);
315 fman->ctx = dma_fence_context_alloc(1);
316
317 return fman;
318}
319
320void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
321{
322 bool lists_empty;
323
324 (void) cancel_work_sync(&fman->work);
325
326 spin_lock(&fman->lock);
327 lists_empty = list_empty(&fman->fence_list) &&
328 list_empty(&fman->cleanup_list);
329 spin_unlock(&fman->lock);
330
331 BUG_ON(!lists_empty);
332 kfree(fman);
333}
334
335static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
336 struct vmw_fence_obj *fence, u32 seqno,
337 void (*destroy) (struct vmw_fence_obj *fence))
338{
339 int ret = 0;
340
341 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
342 fman->ctx, seqno);
343 INIT_LIST_HEAD(&fence->seq_passed_actions);
344 fence->destroy = destroy;
345
346 spin_lock(&fman->lock);
347 if (unlikely(fman->fifo_down)) {
348 ret = -EBUSY;
349 goto out_unlock;
350 }
351 list_add_tail(&fence->head, &fman->fence_list);
352 ++fman->num_fence_objects;
353
354out_unlock:
355 spin_unlock(&fman->lock);
356 return ret;
357
358}
359
360static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
361 struct list_head *list)
362{
363 struct vmw_fence_action *action, *next_action;
364
365 list_for_each_entry_safe(action, next_action, list, head) {
366 list_del_init(&action->head);
367 fman->pending_actions[action->type]--;
368 if (action->seq_passed != NULL)
369 action->seq_passed(action);
370
371
372
373
374
375
376 list_add_tail(&action->head, &fman->cleanup_list);
377 }
378}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
397 u32 passed_seqno)
398{
399 u32 goal_seqno;
400 u32 *fifo_mem;
401 struct vmw_fence_obj *fence;
402
403 if (likely(!fman->seqno_valid))
404 return false;
405
406 fifo_mem = fman->dev_priv->mmio_virt;
407 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
408 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
409 return false;
410
411 fman->seqno_valid = false;
412 list_for_each_entry(fence, &fman->fence_list, head) {
413 if (!list_empty(&fence->seq_passed_actions)) {
414 fman->seqno_valid = true;
415 vmw_mmio_write(fence->base.seqno,
416 fifo_mem + SVGA_FIFO_FENCE_GOAL);
417 break;
418 }
419 }
420
421 return true;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
441{
442 struct vmw_fence_manager *fman = fman_from_fence(fence);
443 u32 goal_seqno;
444 u32 *fifo_mem;
445
446 if (dma_fence_is_signaled_locked(&fence->base))
447 return false;
448
449 fifo_mem = fman->dev_priv->mmio_virt;
450 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
451 if (likely(fman->seqno_valid &&
452 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
453 return false;
454
455 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
456 fman->seqno_valid = true;
457
458 return true;
459}
460
461static void __vmw_fences_update(struct vmw_fence_manager *fman)
462{
463 struct vmw_fence_obj *fence, *next_fence;
464 struct list_head action_list;
465 bool needs_rerun;
466 uint32_t seqno, new_seqno;
467 u32 *fifo_mem = fman->dev_priv->mmio_virt;
468
469 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
470rerun:
471 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
472 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
473 list_del_init(&fence->head);
474 dma_fence_signal_locked(&fence->base);
475 INIT_LIST_HEAD(&action_list);
476 list_splice_init(&fence->seq_passed_actions,
477 &action_list);
478 vmw_fences_perform_actions(fman, &action_list);
479 } else
480 break;
481 }
482
483
484
485
486
487
488
489 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
490 if (unlikely(needs_rerun)) {
491 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
492 if (new_seqno != seqno) {
493 seqno = new_seqno;
494 goto rerun;
495 }
496 }
497
498 if (!list_empty(&fman->cleanup_list))
499 (void) schedule_work(&fman->work);
500}
501
502void vmw_fences_update(struct vmw_fence_manager *fman)
503{
504 spin_lock(&fman->lock);
505 __vmw_fences_update(fman);
506 spin_unlock(&fman->lock);
507}
508
509bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
510{
511 struct vmw_fence_manager *fman = fman_from_fence(fence);
512
513 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
514 return 1;
515
516 vmw_fences_update(fman);
517
518 return dma_fence_is_signaled(&fence->base);
519}
520
521int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
522 bool interruptible, unsigned long timeout)
523{
524 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
525
526 if (likely(ret > 0))
527 return 0;
528 else if (ret == 0)
529 return -EBUSY;
530 else
531 return ret;
532}
533
534void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
535{
536 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
537
538 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
539}
540
541static void vmw_fence_destroy(struct vmw_fence_obj *fence)
542{
543 dma_fence_free(&fence->base);
544}
545
546int vmw_fence_create(struct vmw_fence_manager *fman,
547 uint32_t seqno,
548 struct vmw_fence_obj **p_fence)
549{
550 struct vmw_fence_obj *fence;
551 int ret;
552
553 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
554 if (unlikely(!fence))
555 return -ENOMEM;
556
557 ret = vmw_fence_obj_init(fman, fence, seqno,
558 vmw_fence_destroy);
559 if (unlikely(ret != 0))
560 goto out_err_init;
561
562 *p_fence = fence;
563 return 0;
564
565out_err_init:
566 kfree(fence);
567 return ret;
568}
569
570
571static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
572{
573 struct vmw_user_fence *ufence =
574 container_of(fence, struct vmw_user_fence, fence);
575 struct vmw_fence_manager *fman = fman_from_fence(fence);
576
577 ttm_base_object_kfree(ufence, base);
578
579
580
581 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
582 fman->user_fence_size);
583}
584
585static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
586{
587 struct ttm_base_object *base = *p_base;
588 struct vmw_user_fence *ufence =
589 container_of(base, struct vmw_user_fence, base);
590 struct vmw_fence_obj *fence = &ufence->fence;
591
592 *p_base = NULL;
593 vmw_fence_obj_unreference(&fence);
594}
595
596int vmw_user_fence_create(struct drm_file *file_priv,
597 struct vmw_fence_manager *fman,
598 uint32_t seqno,
599 struct vmw_fence_obj **p_fence,
600 uint32_t *p_handle)
601{
602 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
603 struct vmw_user_fence *ufence;
604 struct vmw_fence_obj *tmp;
605 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
606 struct ttm_operation_ctx ctx = {
607 .interruptible = false,
608 .no_wait_gpu = false
609 };
610 int ret;
611
612
613
614
615
616
617 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
618 &ctx);
619 if (unlikely(ret != 0))
620 return ret;
621
622 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
623 if (unlikely(!ufence)) {
624 ret = -ENOMEM;
625 goto out_no_object;
626 }
627
628 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
629 vmw_user_fence_destroy);
630 if (unlikely(ret != 0)) {
631 kfree(ufence);
632 goto out_no_object;
633 }
634
635
636
637
638
639 tmp = vmw_fence_obj_reference(&ufence->fence);
640 ret = ttm_base_object_init(tfile, &ufence->base, false,
641 VMW_RES_FENCE,
642 &vmw_user_fence_base_release, NULL);
643
644
645 if (unlikely(ret != 0)) {
646
647
648
649 vmw_fence_obj_unreference(&tmp);
650 goto out_err;
651 }
652
653 *p_fence = &ufence->fence;
654 *p_handle = ufence->base.handle;
655
656 return 0;
657out_err:
658 tmp = &ufence->fence;
659 vmw_fence_obj_unreference(&tmp);
660out_no_object:
661 ttm_mem_global_free(mem_glob, fman->user_fence_size);
662 return ret;
663}
664
665
666
667
668
669
670
671
672
673
674
675int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
676 struct dma_fence *fence)
677{
678 struct dma_fence_array *fence_array;
679 int ret = 0;
680 int i;
681
682
683 if (dma_fence_is_signaled(fence))
684 return 0;
685
686 if (!dma_fence_is_array(fence))
687 return dma_fence_wait(fence, true);
688
689
690
691
692
693
694
695
696
697 fence_array = to_dma_fence_array(fence);
698 for (i = 0; i < fence_array->num_fences; i++) {
699 struct dma_fence *child = fence_array->fences[i];
700
701 ret = dma_fence_wait(child, true);
702
703 if (ret < 0)
704 return ret;
705 }
706
707 return 0;
708}
709
710
711
712
713
714
715void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
716{
717 struct list_head action_list;
718 int ret;
719
720
721
722
723
724
725 spin_lock(&fman->lock);
726 fman->fifo_down = true;
727 while (!list_empty(&fman->fence_list)) {
728 struct vmw_fence_obj *fence =
729 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
730 head);
731 dma_fence_get(&fence->base);
732 spin_unlock(&fman->lock);
733
734 ret = vmw_fence_obj_wait(fence, false, false,
735 VMW_FENCE_WAIT_TIMEOUT);
736
737 if (unlikely(ret != 0)) {
738 list_del_init(&fence->head);
739 dma_fence_signal(&fence->base);
740 INIT_LIST_HEAD(&action_list);
741 list_splice_init(&fence->seq_passed_actions,
742 &action_list);
743 vmw_fences_perform_actions(fman, &action_list);
744 }
745
746 BUG_ON(!list_empty(&fence->head));
747 dma_fence_put(&fence->base);
748 spin_lock(&fman->lock);
749 }
750 spin_unlock(&fman->lock);
751}
752
753void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
754{
755 spin_lock(&fman->lock);
756 fman->fifo_down = false;
757 spin_unlock(&fman->lock);
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774static struct ttm_base_object *
775vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
776{
777 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
778
779 if (!base) {
780 pr_err("Invalid fence object handle 0x%08lx.\n",
781 (unsigned long)handle);
782 return ERR_PTR(-EINVAL);
783 }
784
785 if (base->refcount_release != vmw_user_fence_base_release) {
786 pr_err("Invalid fence object handle 0x%08lx.\n",
787 (unsigned long)handle);
788 ttm_base_object_unref(&base);
789 return ERR_PTR(-EINVAL);
790 }
791
792 return base;
793}
794
795
796int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file_priv)
798{
799 struct drm_vmw_fence_wait_arg *arg =
800 (struct drm_vmw_fence_wait_arg *)data;
801 unsigned long timeout;
802 struct ttm_base_object *base;
803 struct vmw_fence_obj *fence;
804 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
805 int ret;
806 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
807
808
809
810
811
812
813 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
814 (wait_timeout >> 26);
815
816 if (!arg->cookie_valid) {
817 arg->cookie_valid = 1;
818 arg->kernel_cookie = jiffies + wait_timeout;
819 }
820
821 base = vmw_fence_obj_lookup(tfile, arg->handle);
822 if (IS_ERR(base))
823 return PTR_ERR(base);
824
825 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
826
827 timeout = jiffies;
828 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
829 ret = ((vmw_fence_obj_signaled(fence)) ?
830 0 : -EBUSY);
831 goto out;
832 }
833
834 timeout = (unsigned long)arg->kernel_cookie - timeout;
835
836 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
837
838out:
839 ttm_base_object_unref(&base);
840
841
842
843
844
845 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
846 return ttm_ref_object_base_unref(tfile, arg->handle,
847 TTM_REF_USAGE);
848 return ret;
849}
850
851int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
853{
854 struct drm_vmw_fence_signaled_arg *arg =
855 (struct drm_vmw_fence_signaled_arg *) data;
856 struct ttm_base_object *base;
857 struct vmw_fence_obj *fence;
858 struct vmw_fence_manager *fman;
859 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
860 struct vmw_private *dev_priv = vmw_priv(dev);
861
862 base = vmw_fence_obj_lookup(tfile, arg->handle);
863 if (IS_ERR(base))
864 return PTR_ERR(base);
865
866 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
867 fman = fman_from_fence(fence);
868
869 arg->signaled = vmw_fence_obj_signaled(fence);
870
871 arg->signaled_flags = arg->flags;
872 spin_lock(&fman->lock);
873 arg->passed_seqno = dev_priv->last_read_seqno;
874 spin_unlock(&fman->lock);
875
876 ttm_base_object_unref(&base);
877
878 return 0;
879}
880
881
882int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
883 struct drm_file *file_priv)
884{
885 struct drm_vmw_fence_arg *arg =
886 (struct drm_vmw_fence_arg *) data;
887
888 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
889 arg->handle,
890 TTM_REF_USAGE);
891}
892
893
894
895
896
897
898
899
900
901
902
903static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
904{
905 struct vmw_event_fence_action *eaction =
906 container_of(action, struct vmw_event_fence_action, action);
907 struct drm_device *dev = eaction->dev;
908 struct drm_pending_event *event = eaction->event;
909
910 if (unlikely(event == NULL))
911 return;
912
913 spin_lock_irq(&dev->event_lock);
914
915 if (likely(eaction->tv_sec != NULL)) {
916 struct timespec64 ts;
917
918 ktime_get_ts64(&ts);
919
920 *eaction->tv_sec = ts.tv_sec;
921 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
922 }
923
924 drm_send_event_locked(dev, eaction->event);
925 eaction->event = NULL;
926 spin_unlock_irq(&dev->event_lock);
927}
928
929
930
931
932
933
934
935
936
937
938static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
939{
940 struct vmw_event_fence_action *eaction =
941 container_of(action, struct vmw_event_fence_action, action);
942
943 vmw_fence_obj_unreference(&eaction->fence);
944 kfree(eaction);
945}
946
947
948
949
950
951
952
953
954
955
956
957static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
958 struct vmw_fence_action *action)
959{
960 struct vmw_fence_manager *fman = fman_from_fence(fence);
961 bool run_update = false;
962
963 mutex_lock(&fman->goal_irq_mutex);
964 spin_lock(&fman->lock);
965
966 fman->pending_actions[action->type]++;
967 if (dma_fence_is_signaled_locked(&fence->base)) {
968 struct list_head action_list;
969
970 INIT_LIST_HEAD(&action_list);
971 list_add_tail(&action->head, &action_list);
972 vmw_fences_perform_actions(fman, &action_list);
973 } else {
974 list_add_tail(&action->head, &fence->seq_passed_actions);
975
976
977
978
979
980 run_update = vmw_fence_goal_check_locked(fence);
981 }
982
983 spin_unlock(&fman->lock);
984
985 if (run_update) {
986 if (!fman->goal_irq_on) {
987 fman->goal_irq_on = true;
988 vmw_goal_waiter_add(fman->dev_priv);
989 }
990 vmw_fences_update(fman);
991 }
992 mutex_unlock(&fman->goal_irq_mutex);
993
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011int vmw_event_fence_action_queue(struct drm_file *file_priv,
1012 struct vmw_fence_obj *fence,
1013 struct drm_pending_event *event,
1014 uint32_t *tv_sec,
1015 uint32_t *tv_usec,
1016 bool interruptible)
1017{
1018 struct vmw_event_fence_action *eaction;
1019 struct vmw_fence_manager *fman = fman_from_fence(fence);
1020
1021 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1022 if (unlikely(!eaction))
1023 return -ENOMEM;
1024
1025 eaction->event = event;
1026
1027 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1028 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1029 eaction->action.type = VMW_ACTION_EVENT;
1030
1031 eaction->fence = vmw_fence_obj_reference(fence);
1032 eaction->dev = fman->dev_priv->dev;
1033 eaction->tv_sec = tv_sec;
1034 eaction->tv_usec = tv_usec;
1035
1036 vmw_fence_obj_add_action(fence, &eaction->action);
1037
1038 return 0;
1039}
1040
1041struct vmw_event_fence_pending {
1042 struct drm_pending_event base;
1043 struct drm_vmw_event_fence event;
1044};
1045
1046static int vmw_event_fence_action_create(struct drm_file *file_priv,
1047 struct vmw_fence_obj *fence,
1048 uint32_t flags,
1049 uint64_t user_data,
1050 bool interruptible)
1051{
1052 struct vmw_event_fence_pending *event;
1053 struct vmw_fence_manager *fman = fman_from_fence(fence);
1054 struct drm_device *dev = fman->dev_priv->dev;
1055 int ret;
1056
1057 event = kzalloc(sizeof(*event), GFP_KERNEL);
1058 if (unlikely(!event)) {
1059 DRM_ERROR("Failed to allocate an event.\n");
1060 ret = -ENOMEM;
1061 goto out_no_space;
1062 }
1063
1064 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1065 event->event.base.length = sizeof(*event);
1066 event->event.user_data = user_data;
1067
1068 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1069
1070 if (unlikely(ret != 0)) {
1071 DRM_ERROR("Failed to allocate event space for this file.\n");
1072 kfree(event);
1073 goto out_no_space;
1074 }
1075
1076 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1077 ret = vmw_event_fence_action_queue(file_priv, fence,
1078 &event->base,
1079 &event->event.tv_sec,
1080 &event->event.tv_usec,
1081 interruptible);
1082 else
1083 ret = vmw_event_fence_action_queue(file_priv, fence,
1084 &event->base,
1085 NULL,
1086 NULL,
1087 interruptible);
1088 if (ret != 0)
1089 goto out_no_queue;
1090
1091 return 0;
1092
1093out_no_queue:
1094 drm_event_cancel_free(dev, &event->base);
1095out_no_space:
1096 return ret;
1097}
1098
1099int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1100 struct drm_file *file_priv)
1101{
1102 struct vmw_private *dev_priv = vmw_priv(dev);
1103 struct drm_vmw_fence_event_arg *arg =
1104 (struct drm_vmw_fence_event_arg *) data;
1105 struct vmw_fence_obj *fence = NULL;
1106 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1107 struct ttm_object_file *tfile = vmw_fp->tfile;
1108 struct drm_vmw_fence_rep __user *user_fence_rep =
1109 (struct drm_vmw_fence_rep __user *)(unsigned long)
1110 arg->fence_rep;
1111 uint32_t handle;
1112 int ret;
1113
1114
1115
1116
1117
1118
1119 if (arg->handle) {
1120 struct ttm_base_object *base =
1121 vmw_fence_obj_lookup(tfile, arg->handle);
1122
1123 if (IS_ERR(base))
1124 return PTR_ERR(base);
1125
1126 fence = &(container_of(base, struct vmw_user_fence,
1127 base)->fence);
1128 (void) vmw_fence_obj_reference(fence);
1129
1130 if (user_fence_rep != NULL) {
1131 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1132 TTM_REF_USAGE, NULL, false);
1133 if (unlikely(ret != 0)) {
1134 DRM_ERROR("Failed to reference a fence "
1135 "object.\n");
1136 goto out_no_ref_obj;
1137 }
1138 handle = base->handle;
1139 }
1140 ttm_base_object_unref(&base);
1141 }
1142
1143
1144
1145
1146 if (!fence) {
1147 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1148 &fence,
1149 (user_fence_rep) ?
1150 &handle : NULL);
1151 if (unlikely(ret != 0)) {
1152 DRM_ERROR("Fence event failed to create fence.\n");
1153 return ret;
1154 }
1155 }
1156
1157 BUG_ON(fence == NULL);
1158
1159 ret = vmw_event_fence_action_create(file_priv, fence,
1160 arg->flags,
1161 arg->user_data,
1162 true);
1163 if (unlikely(ret != 0)) {
1164 if (ret != -ERESTARTSYS)
1165 DRM_ERROR("Failed to attach event to fence.\n");
1166 goto out_no_create;
1167 }
1168
1169 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1170 handle, -1, NULL);
1171 vmw_fence_obj_unreference(&fence);
1172 return 0;
1173out_no_create:
1174 if (user_fence_rep != NULL)
1175 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1176out_no_ref_obj:
1177 vmw_fence_obj_unreference(&fence);
1178 return ret;
1179}
1180