1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "vmwgfx_drv.h"
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/drmP.h>
32#include "vmwgfx_resource_priv.h"
33#include "vmwgfx_binding.h"
34
35#define VMW_RES_EVICT_ERR_COUNT 10
36
37struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
38{
39 kref_get(&res->kref);
40 return res;
41}
42
43struct vmw_resource *
44vmw_resource_reference_unless_doomed(struct vmw_resource *res)
45{
46 return kref_get_unless_zero(&res->kref) ? res : NULL;
47}
48
49
50
51
52
53
54
55
56void vmw_resource_release_id(struct vmw_resource *res)
57{
58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60
61 write_lock(&dev_priv->resource_lock);
62 if (res->id != -1)
63 idr_remove(idr, res->id);
64 res->id = -1;
65 write_unlock(&dev_priv->resource_lock);
66}
67
68static void vmw_resource_release(struct kref *kref)
69{
70 struct vmw_resource *res =
71 container_of(kref, struct vmw_resource, kref);
72 struct vmw_private *dev_priv = res->dev_priv;
73 int id;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75
76 write_lock(&dev_priv->resource_lock);
77 res->avail = false;
78 list_del_init(&res->lru_head);
79 write_unlock(&dev_priv->resource_lock);
80 if (res->backup) {
81 struct ttm_buffer_object *bo = &res->backup->base;
82
83 ttm_bo_reserve(bo, false, false, NULL);
84 if (!list_empty(&res->mob_head) &&
85 res->func->unbind != NULL) {
86 struct ttm_validate_buffer val_buf;
87
88 val_buf.bo = bo;
89 val_buf.shared = false;
90 res->func->unbind(res, false, &val_buf);
91 }
92 res->backup_dirty = false;
93 list_del_init(&res->mob_head);
94 ttm_bo_unreserve(bo);
95 vmw_bo_unreference(&res->backup);
96 }
97
98 if (likely(res->hw_destroy != NULL)) {
99 mutex_lock(&dev_priv->binding_mutex);
100 vmw_binding_res_list_kill(&res->binding_head);
101 mutex_unlock(&dev_priv->binding_mutex);
102 res->hw_destroy(res);
103 }
104
105 id = res->id;
106 if (res->res_free != NULL)
107 res->res_free(res);
108 else
109 kfree(res);
110
111 write_lock(&dev_priv->resource_lock);
112 if (id != -1)
113 idr_remove(idr, id);
114 write_unlock(&dev_priv->resource_lock);
115}
116
117void vmw_resource_unreference(struct vmw_resource **p_res)
118{
119 struct vmw_resource *res = *p_res;
120
121 *p_res = NULL;
122 kref_put(&res->kref, vmw_resource_release);
123}
124
125
126
127
128
129
130
131
132
133
134int vmw_resource_alloc_id(struct vmw_resource *res)
135{
136 struct vmw_private *dev_priv = res->dev_priv;
137 int ret;
138 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
139
140 BUG_ON(res->id != -1);
141
142 idr_preload(GFP_KERNEL);
143 write_lock(&dev_priv->resource_lock);
144
145 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
146 if (ret >= 0)
147 res->id = ret;
148
149 write_unlock(&dev_priv->resource_lock);
150 idr_preload_end();
151 return ret < 0 ? ret : 0;
152}
153
154
155
156
157
158
159
160
161
162
163
164
165int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
166 bool delay_id,
167 void (*res_free) (struct vmw_resource *res),
168 const struct vmw_res_func *func)
169{
170 kref_init(&res->kref);
171 res->hw_destroy = NULL;
172 res->res_free = res_free;
173 res->avail = false;
174 res->dev_priv = dev_priv;
175 res->func = func;
176 INIT_LIST_HEAD(&res->lru_head);
177 INIT_LIST_HEAD(&res->mob_head);
178 INIT_LIST_HEAD(&res->binding_head);
179 res->id = -1;
180 res->backup = NULL;
181 res->backup_offset = 0;
182 res->backup_dirty = false;
183 res->res_dirty = false;
184 if (delay_id)
185 return 0;
186 else
187 return vmw_resource_alloc_id(res);
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202void vmw_resource_activate(struct vmw_resource *res,
203 void (*hw_destroy) (struct vmw_resource *))
204{
205 struct vmw_private *dev_priv = res->dev_priv;
206
207 write_lock(&dev_priv->resource_lock);
208 res->avail = true;
209 res->hw_destroy = hw_destroy;
210 write_unlock(&dev_priv->resource_lock);
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
228 struct ttm_object_file *tfile,
229 uint32_t handle,
230 const struct vmw_user_resource_conv
231 *converter,
232 struct vmw_resource **p_res)
233{
234 struct ttm_base_object *base;
235 struct vmw_resource *res;
236 int ret = -EINVAL;
237
238 base = ttm_base_object_lookup(tfile, handle);
239 if (unlikely(base == NULL))
240 return -EINVAL;
241
242 if (unlikely(ttm_base_object_type(base) != converter->object_type))
243 goto out_bad_resource;
244
245 res = converter->base_obj_to_res(base);
246
247 read_lock(&dev_priv->resource_lock);
248 if (!res->avail || res->res_free != converter->res_free) {
249 read_unlock(&dev_priv->resource_lock);
250 goto out_bad_resource;
251 }
252
253 kref_get(&res->kref);
254 read_unlock(&dev_priv->resource_lock);
255
256 *p_res = res;
257 ret = 0;
258
259out_bad_resource:
260 ttm_base_object_unref(&base);
261
262 return ret;
263}
264
265
266
267
268
269
270int vmw_user_lookup_handle(struct vmw_private *dev_priv,
271 struct ttm_object_file *tfile,
272 uint32_t handle,
273 struct vmw_surface **out_surf,
274 struct vmw_buffer_object **out_buf)
275{
276 struct vmw_resource *res;
277 int ret;
278
279 BUG_ON(*out_surf || *out_buf);
280
281 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
282 user_surface_converter,
283 &res);
284 if (!ret) {
285 *out_surf = vmw_res_to_srf(res);
286 return 0;
287 }
288
289 *out_surf = NULL;
290 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
291 return ret;
292}
293
294
295
296
297
298
299
300
301static int vmw_resource_buf_alloc(struct vmw_resource *res,
302 bool interruptible)
303{
304 unsigned long size =
305 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
306 struct vmw_buffer_object *backup;
307 int ret;
308
309 if (likely(res->backup)) {
310 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
311 return 0;
312 }
313
314 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
315 if (unlikely(!backup))
316 return -ENOMEM;
317
318 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
319 res->func->backup_placement,
320 interruptible,
321 &vmw_bo_bo_free);
322 if (unlikely(ret != 0))
323 goto out_no_bo;
324
325 res->backup = backup;
326
327out_no_bo:
328 return ret;
329}
330
331
332
333
334
335
336
337
338
339
340
341
342static int vmw_resource_do_validate(struct vmw_resource *res,
343 struct ttm_validate_buffer *val_buf)
344{
345 int ret = 0;
346 const struct vmw_res_func *func = res->func;
347
348 if (unlikely(res->id == -1)) {
349 ret = func->create(res);
350 if (unlikely(ret != 0))
351 return ret;
352 }
353
354 if (func->bind &&
355 ((func->needs_backup && list_empty(&res->mob_head) &&
356 val_buf->bo != NULL) ||
357 (!func->needs_backup && val_buf->bo != NULL))) {
358 ret = func->bind(res, val_buf);
359 if (unlikely(ret != 0))
360 goto out_bind_failed;
361 if (func->needs_backup)
362 list_add_tail(&res->mob_head, &res->backup->res_list);
363 }
364
365
366
367
368
369
370
371 res->res_dirty = true;
372
373 return 0;
374
375out_bind_failed:
376 func->destroy(res);
377
378 return ret;
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394void vmw_resource_unreserve(struct vmw_resource *res,
395 bool switch_backup,
396 struct vmw_buffer_object *new_backup,
397 unsigned long new_backup_offset)
398{
399 struct vmw_private *dev_priv = res->dev_priv;
400
401 if (!list_empty(&res->lru_head))
402 return;
403
404 if (switch_backup && new_backup != res->backup) {
405 if (res->backup) {
406 lockdep_assert_held(&res->backup->base.resv->lock.base);
407 list_del_init(&res->mob_head);
408 vmw_bo_unreference(&res->backup);
409 }
410
411 if (new_backup) {
412 res->backup = vmw_bo_reference(new_backup);
413 lockdep_assert_held(&new_backup->base.resv->lock.base);
414 list_add_tail(&res->mob_head, &new_backup->res_list);
415 } else {
416 res->backup = NULL;
417 }
418 }
419 if (switch_backup)
420 res->backup_offset = new_backup_offset;
421
422 if (!res->func->may_evict || res->id == -1 || res->pin_count)
423 return;
424
425 write_lock(&dev_priv->resource_lock);
426 list_add_tail(&res->lru_head,
427 &res->dev_priv->res_lru[res->func->res_type]);
428 write_unlock(&dev_priv->resource_lock);
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443static int
444vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
445 struct vmw_resource *res,
446 bool interruptible,
447 struct ttm_validate_buffer *val_buf)
448{
449 struct ttm_operation_ctx ctx = { true, false };
450 struct list_head val_list;
451 bool backup_dirty = false;
452 int ret;
453
454 if (unlikely(res->backup == NULL)) {
455 ret = vmw_resource_buf_alloc(res, interruptible);
456 if (unlikely(ret != 0))
457 return ret;
458 }
459
460 INIT_LIST_HEAD(&val_list);
461 val_buf->bo = ttm_bo_reference(&res->backup->base);
462 val_buf->shared = false;
463 list_add_tail(&val_buf->head, &val_list);
464 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
465 if (unlikely(ret != 0))
466 goto out_no_reserve;
467
468 if (res->func->needs_backup && list_empty(&res->mob_head))
469 return 0;
470
471 backup_dirty = res->backup_dirty;
472 ret = ttm_bo_validate(&res->backup->base,
473 res->func->backup_placement,
474 &ctx);
475
476 if (unlikely(ret != 0))
477 goto out_no_validate;
478
479 return 0;
480
481out_no_validate:
482 ttm_eu_backoff_reservation(ticket, &val_list);
483out_no_reserve:
484 ttm_bo_unref(&val_buf->bo);
485 if (backup_dirty)
486 vmw_bo_unreference(&res->backup);
487
488 return ret;
489}
490
491
492
493
494
495
496
497
498
499
500
501int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
502 bool no_backup)
503{
504 struct vmw_private *dev_priv = res->dev_priv;
505 int ret;
506
507 write_lock(&dev_priv->resource_lock);
508 list_del_init(&res->lru_head);
509 write_unlock(&dev_priv->resource_lock);
510
511 if (res->func->needs_backup && res->backup == NULL &&
512 !no_backup) {
513 ret = vmw_resource_buf_alloc(res, interruptible);
514 if (unlikely(ret != 0)) {
515 DRM_ERROR("Failed to allocate a backup buffer "
516 "of size %lu. bytes\n",
517 (unsigned long) res->backup_size);
518 return ret;
519 }
520 }
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532static void
533vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
534 struct ttm_validate_buffer *val_buf)
535{
536 struct list_head val_list;
537
538 if (likely(val_buf->bo == NULL))
539 return;
540
541 INIT_LIST_HEAD(&val_list);
542 list_add_tail(&val_buf->head, &val_list);
543 ttm_eu_backoff_reservation(ticket, &val_list);
544 ttm_bo_unref(&val_buf->bo);
545}
546
547
548
549
550
551
552
553
554
555static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
556 struct vmw_resource *res, bool interruptible)
557{
558 struct ttm_validate_buffer val_buf;
559 const struct vmw_res_func *func = res->func;
560 int ret;
561
562 BUG_ON(!func->may_evict);
563
564 val_buf.bo = NULL;
565 val_buf.shared = false;
566 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
567 if (unlikely(ret != 0))
568 return ret;
569
570 if (unlikely(func->unbind != NULL &&
571 (!func->needs_backup || !list_empty(&res->mob_head)))) {
572 ret = func->unbind(res, res->res_dirty, &val_buf);
573 if (unlikely(ret != 0))
574 goto out_no_unbind;
575 list_del_init(&res->mob_head);
576 }
577 ret = func->destroy(res);
578 res->backup_dirty = true;
579 res->res_dirty = false;
580out_no_unbind:
581 vmw_resource_backoff_reservation(ticket, &val_buf);
582
583 return ret;
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598int vmw_resource_validate(struct vmw_resource *res)
599{
600 int ret;
601 struct vmw_resource *evict_res;
602 struct vmw_private *dev_priv = res->dev_priv;
603 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
604 struct ttm_validate_buffer val_buf;
605 unsigned err_count = 0;
606
607 if (!res->func->create)
608 return 0;
609
610 val_buf.bo = NULL;
611 val_buf.shared = false;
612 if (res->backup)
613 val_buf.bo = &res->backup->base;
614 do {
615 ret = vmw_resource_do_validate(res, &val_buf);
616 if (likely(ret != -EBUSY))
617 break;
618
619 write_lock(&dev_priv->resource_lock);
620 if (list_empty(lru_list) || !res->func->may_evict) {
621 DRM_ERROR("Out of device device resources "
622 "for %s.\n", res->func->type_name);
623 ret = -EBUSY;
624 write_unlock(&dev_priv->resource_lock);
625 break;
626 }
627
628 evict_res = vmw_resource_reference
629 (list_first_entry(lru_list, struct vmw_resource,
630 lru_head));
631 list_del_init(&evict_res->lru_head);
632
633 write_unlock(&dev_priv->resource_lock);
634
635
636 ret = vmw_resource_do_evict(NULL, evict_res, true);
637 if (unlikely(ret != 0)) {
638 write_lock(&dev_priv->resource_lock);
639 list_add_tail(&evict_res->lru_head, lru_list);
640 write_unlock(&dev_priv->resource_lock);
641 if (ret == -ERESTARTSYS ||
642 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
643 vmw_resource_unreference(&evict_res);
644 goto out_no_validate;
645 }
646 }
647
648 vmw_resource_unreference(&evict_res);
649 } while (1);
650
651 if (unlikely(ret != 0))
652 goto out_no_validate;
653 else if (!res->func->needs_backup && res->backup) {
654 list_del_init(&res->mob_head);
655 vmw_bo_unreference(&res->backup);
656 }
657
658 return 0;
659
660out_no_validate:
661 return ret;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
677{
678
679 struct vmw_resource *res, *next;
680 struct ttm_validate_buffer val_buf = {
681 .bo = &vbo->base,
682 .shared = false
683 };
684
685 lockdep_assert_held(&vbo->base.resv->lock.base);
686 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
687 if (!res->func->unbind)
688 continue;
689
690 (void) res->func->unbind(res, true, &val_buf);
691 res->backup_dirty = true;
692 res->res_dirty = false;
693 list_del_init(&res->mob_head);
694 }
695
696 (void) ttm_bo_wait(&vbo->base, false, false);
697}
698
699
700
701
702
703
704
705
706
707
708int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
709{
710 struct vmw_resource *dx_query_ctx;
711 struct vmw_private *dev_priv;
712 struct {
713 SVGA3dCmdHeader header;
714 SVGA3dCmdDXReadbackAllQuery body;
715 } *cmd;
716
717
718
719 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
720 return 0;
721
722 dx_query_ctx = dx_query_mob->dx_query_ctx;
723 dev_priv = dx_query_ctx->dev_priv;
724
725 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
726 if (unlikely(cmd == NULL)) {
727 DRM_ERROR("Failed reserving FIFO space for "
728 "query MOB read back.\n");
729 return -ENOMEM;
730 }
731
732 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
733 cmd->header.size = sizeof(cmd->body);
734 cmd->body.cid = dx_query_ctx->id;
735
736 vmw_fifo_commit(dev_priv, sizeof(*cmd));
737
738
739 dx_query_mob->dx_query_ctx = NULL;
740
741 return 0;
742}
743
744
745
746
747
748
749
750
751
752
753
754
755void vmw_query_move_notify(struct ttm_buffer_object *bo,
756 struct ttm_mem_reg *mem)
757{
758 struct vmw_buffer_object *dx_query_mob;
759 struct ttm_bo_device *bdev = bo->bdev;
760 struct vmw_private *dev_priv;
761
762
763 dev_priv = container_of(bdev, struct vmw_private, bdev);
764
765 mutex_lock(&dev_priv->binding_mutex);
766
767 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
768 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
769 mutex_unlock(&dev_priv->binding_mutex);
770 return;
771 }
772
773
774 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
775 struct vmw_fence_obj *fence;
776
777 (void) vmw_query_readback_all(dx_query_mob);
778 mutex_unlock(&dev_priv->binding_mutex);
779
780
781 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
782 vmw_bo_fence_single(bo, fence);
783
784 if (fence != NULL)
785 vmw_fence_obj_unreference(&fence);
786
787 (void) ttm_bo_wait(bo, false, false);
788 } else
789 mutex_unlock(&dev_priv->binding_mutex);
790
791}
792
793
794
795
796
797
798bool vmw_resource_needs_backup(const struct vmw_resource *res)
799{
800 return res->func->needs_backup;
801}
802
803
804
805
806
807
808
809
810
811
812static void vmw_resource_evict_type(struct vmw_private *dev_priv,
813 enum vmw_res_type type)
814{
815 struct list_head *lru_list = &dev_priv->res_lru[type];
816 struct vmw_resource *evict_res;
817 unsigned err_count = 0;
818 int ret;
819 struct ww_acquire_ctx ticket;
820
821 do {
822 write_lock(&dev_priv->resource_lock);
823
824 if (list_empty(lru_list))
825 goto out_unlock;
826
827 evict_res = vmw_resource_reference(
828 list_first_entry(lru_list, struct vmw_resource,
829 lru_head));
830 list_del_init(&evict_res->lru_head);
831 write_unlock(&dev_priv->resource_lock);
832
833
834 ret = vmw_resource_do_evict(&ticket, evict_res, false);
835 if (unlikely(ret != 0)) {
836 write_lock(&dev_priv->resource_lock);
837 list_add_tail(&evict_res->lru_head, lru_list);
838 write_unlock(&dev_priv->resource_lock);
839 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
840 vmw_resource_unreference(&evict_res);
841 return;
842 }
843 }
844
845 vmw_resource_unreference(&evict_res);
846 } while (1);
847
848out_unlock:
849 write_unlock(&dev_priv->resource_lock);
850}
851
852
853
854
855
856
857
858
859
860
861
862void vmw_resource_evict_all(struct vmw_private *dev_priv)
863{
864 enum vmw_res_type type;
865
866 mutex_lock(&dev_priv->cmdbuf_mutex);
867
868 for (type = 0; type < vmw_res_max; ++type)
869 vmw_resource_evict_type(dev_priv, type);
870
871 mutex_unlock(&dev_priv->cmdbuf_mutex);
872}
873
874
875
876
877
878
879
880
881
882
883
884int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
885{
886 struct ttm_operation_ctx ctx = { interruptible, false };
887 struct vmw_private *dev_priv = res->dev_priv;
888 int ret;
889
890 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
891 mutex_lock(&dev_priv->cmdbuf_mutex);
892 ret = vmw_resource_reserve(res, interruptible, false);
893 if (ret)
894 goto out_no_reserve;
895
896 if (res->pin_count == 0) {
897 struct vmw_buffer_object *vbo = NULL;
898
899 if (res->backup) {
900 vbo = res->backup;
901
902 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
903 if (!vbo->pin_count) {
904 ret = ttm_bo_validate
905 (&vbo->base,
906 res->func->backup_placement,
907 &ctx);
908 if (ret) {
909 ttm_bo_unreserve(&vbo->base);
910 goto out_no_validate;
911 }
912 }
913
914
915 vmw_bo_pin_reserved(vbo, true);
916 }
917 ret = vmw_resource_validate(res);
918 if (vbo)
919 ttm_bo_unreserve(&vbo->base);
920 if (ret)
921 goto out_no_validate;
922 }
923 res->pin_count++;
924
925out_no_validate:
926 vmw_resource_unreserve(res, false, NULL, 0UL);
927out_no_reserve:
928 mutex_unlock(&dev_priv->cmdbuf_mutex);
929 ttm_write_unlock(&dev_priv->reservation_sem);
930
931 return ret;
932}
933
934
935
936
937
938
939
940
941
942void vmw_resource_unpin(struct vmw_resource *res)
943{
944 struct vmw_private *dev_priv = res->dev_priv;
945 int ret;
946
947 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
948 mutex_lock(&dev_priv->cmdbuf_mutex);
949
950 ret = vmw_resource_reserve(res, false, true);
951 WARN_ON(ret);
952
953 WARN_ON(res->pin_count == 0);
954 if (--res->pin_count == 0 && res->backup) {
955 struct vmw_buffer_object *vbo = res->backup;
956
957 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
958 vmw_bo_pin_reserved(vbo, false);
959 ttm_bo_unreserve(&vbo->base);
960 }
961
962 vmw_resource_unreserve(res, false, NULL, 0UL);
963
964 mutex_unlock(&dev_priv->cmdbuf_mutex);
965 ttm_read_unlock(&dev_priv->reservation_sem);
966}
967
968
969
970
971
972
973enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
974{
975 return res->func->res_type;
976}
977