1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/ttm/ttm_placement.h>
29
30#include "vmwgfx_resource_priv.h"
31#include "vmwgfx_binding.h"
32#include "vmwgfx_drv.h"
33
34#define VMW_RES_EVICT_ERR_COUNT 10
35
36
37
38
39
40void vmw_resource_mob_attach(struct vmw_resource *res)
41{
42 struct vmw_buffer_object *backup = res->backup;
43 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44
45 dma_resv_assert_held(res->backup->base.base.resv);
46 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47 res->func->prio;
48
49 while (*new) {
50 struct vmw_resource *this =
51 container_of(*new, struct vmw_resource, mob_node);
52
53 parent = *new;
54 new = (res->backup_offset < this->backup_offset) ?
55 &((*new)->rb_left) : &((*new)->rb_right);
56 }
57
58 rb_link_node(&res->mob_node, parent, new);
59 rb_insert_color(&res->mob_node, &backup->res_tree);
60
61 vmw_bo_prio_add(backup, res->used_prio);
62}
63
64
65
66
67
68void vmw_resource_mob_detach(struct vmw_resource *res)
69{
70 struct vmw_buffer_object *backup = res->backup;
71
72 dma_resv_assert_held(backup->base.base.resv);
73 if (vmw_resource_mob_attached(res)) {
74 rb_erase(&res->mob_node, &backup->res_tree);
75 RB_CLEAR_NODE(&res->mob_node);
76 vmw_bo_prio_del(backup, res->used_prio);
77 }
78}
79
80struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81{
82 kref_get(&res->kref);
83 return res;
84}
85
86struct vmw_resource *
87vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88{
89 return kref_get_unless_zero(&res->kref) ? res : NULL;
90}
91
92
93
94
95
96
97
98
99void vmw_resource_release_id(struct vmw_resource *res)
100{
101 struct vmw_private *dev_priv = res->dev_priv;
102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104 spin_lock(&dev_priv->resource_lock);
105 if (res->id != -1)
106 idr_remove(idr, res->id);
107 res->id = -1;
108 spin_unlock(&dev_priv->resource_lock);
109}
110
111static void vmw_resource_release(struct kref *kref)
112{
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
116 int id;
117 int ret;
118 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119
120 spin_lock(&dev_priv->resource_lock);
121 list_del_init(&res->lru_head);
122 spin_unlock(&dev_priv->resource_lock);
123 if (res->backup) {
124 struct ttm_buffer_object *bo = &res->backup->base;
125
126 ret = ttm_bo_reserve(bo, false, false, NULL);
127 BUG_ON(ret);
128 if (vmw_resource_mob_attached(res) &&
129 res->func->unbind != NULL) {
130 struct ttm_validate_buffer val_buf;
131
132 val_buf.bo = bo;
133 val_buf.num_shared = 0;
134 res->func->unbind(res, false, &val_buf);
135 }
136 res->backup_dirty = false;
137 vmw_resource_mob_detach(res);
138 if (res->dirty)
139 res->func->dirty_free(res);
140 if (res->coherent)
141 vmw_bo_dirty_release(res->backup);
142 ttm_bo_unreserve(bo);
143 vmw_bo_unreference(&res->backup);
144 }
145
146 if (likely(res->hw_destroy != NULL)) {
147 mutex_lock(&dev_priv->binding_mutex);
148 vmw_binding_res_list_kill(&res->binding_head);
149 mutex_unlock(&dev_priv->binding_mutex);
150 res->hw_destroy(res);
151 }
152
153 id = res->id;
154 if (res->res_free != NULL)
155 res->res_free(res);
156 else
157 kfree(res);
158
159 spin_lock(&dev_priv->resource_lock);
160 if (id != -1)
161 idr_remove(idr, id);
162 spin_unlock(&dev_priv->resource_lock);
163}
164
165void vmw_resource_unreference(struct vmw_resource **p_res)
166{
167 struct vmw_resource *res = *p_res;
168
169 *p_res = NULL;
170 kref_put(&res->kref, vmw_resource_release);
171}
172
173
174
175
176
177
178
179
180
181
182int vmw_resource_alloc_id(struct vmw_resource *res)
183{
184 struct vmw_private *dev_priv = res->dev_priv;
185 int ret;
186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187
188 BUG_ON(res->id != -1);
189
190 idr_preload(GFP_KERNEL);
191 spin_lock(&dev_priv->resource_lock);
192
193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194 if (ret >= 0)
195 res->id = ret;
196
197 spin_unlock(&dev_priv->resource_lock);
198 idr_preload_end();
199 return ret < 0 ? ret : 0;
200}
201
202
203
204
205
206
207
208
209
210
211
212int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
213 bool delay_id,
214 void (*res_free) (struct vmw_resource *res),
215 const struct vmw_res_func *func)
216{
217 kref_init(&res->kref);
218 res->hw_destroy = NULL;
219 res->res_free = res_free;
220 res->dev_priv = dev_priv;
221 res->func = func;
222 RB_CLEAR_NODE(&res->mob_node);
223 INIT_LIST_HEAD(&res->lru_head);
224 INIT_LIST_HEAD(&res->binding_head);
225 res->id = -1;
226 res->backup = NULL;
227 res->backup_offset = 0;
228 res->backup_dirty = false;
229 res->res_dirty = false;
230 res->coherent = false;
231 res->used_prio = 3;
232 res->dirty = NULL;
233 if (delay_id)
234 return 0;
235 else
236 return vmw_resource_alloc_id(res);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255 struct ttm_object_file *tfile,
256 uint32_t handle,
257 const struct vmw_user_resource_conv
258 *converter,
259 struct vmw_resource **p_res)
260{
261 struct ttm_base_object *base;
262 struct vmw_resource *res;
263 int ret = -EINVAL;
264
265 base = ttm_base_object_lookup(tfile, handle);
266 if (unlikely(base == NULL))
267 return -EINVAL;
268
269 if (unlikely(ttm_base_object_type(base) != converter->object_type))
270 goto out_bad_resource;
271
272 res = converter->base_obj_to_res(base);
273 kref_get(&res->kref);
274
275 *p_res = res;
276 ret = 0;
277
278out_bad_resource:
279 ttm_base_object_unref(&base);
280
281 return ret;
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296struct vmw_resource *
297vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
298 struct ttm_object_file *tfile,
299 uint32_t handle,
300 const struct vmw_user_resource_conv
301 *converter)
302{
303 struct ttm_base_object *base;
304
305 base = ttm_base_object_noref_lookup(tfile, handle);
306 if (!base)
307 return ERR_PTR(-ESRCH);
308
309 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
310 ttm_base_object_noref_release();
311 return ERR_PTR(-EINVAL);
312 }
313
314 return converter->base_obj_to_res(base);
315}
316
317
318
319
320
321
322int vmw_user_lookup_handle(struct vmw_private *dev_priv,
323 struct ttm_object_file *tfile,
324 uint32_t handle,
325 struct vmw_surface **out_surf,
326 struct vmw_buffer_object **out_buf)
327{
328 struct vmw_resource *res;
329 int ret;
330
331 BUG_ON(*out_surf || *out_buf);
332
333 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
334 user_surface_converter,
335 &res);
336 if (!ret) {
337 *out_surf = vmw_res_to_srf(res);
338 return 0;
339 }
340
341 *out_surf = NULL;
342 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
343 return ret;
344}
345
346
347
348
349
350
351
352
353static int vmw_resource_buf_alloc(struct vmw_resource *res,
354 bool interruptible)
355{
356 unsigned long size = PFN_ALIGN(res->backup_size);
357 struct vmw_buffer_object *backup;
358 int ret;
359
360 if (likely(res->backup)) {
361 BUG_ON(res->backup->base.base.size < size);
362 return 0;
363 }
364
365 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
366 if (unlikely(!backup))
367 return -ENOMEM;
368
369 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
370 res->func->backup_placement,
371 interruptible, false,
372 &vmw_bo_bo_free);
373 if (unlikely(ret != 0))
374 goto out_no_bo;
375
376 res->backup = backup;
377
378out_no_bo:
379 return ret;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394static int vmw_resource_do_validate(struct vmw_resource *res,
395 struct ttm_validate_buffer *val_buf,
396 bool dirtying)
397{
398 int ret = 0;
399 const struct vmw_res_func *func = res->func;
400
401 if (unlikely(res->id == -1)) {
402 ret = func->create(res);
403 if (unlikely(ret != 0))
404 return ret;
405 }
406
407 if (func->bind &&
408 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
409 val_buf->bo != NULL) ||
410 (!func->needs_backup && val_buf->bo != NULL))) {
411 ret = func->bind(res, val_buf);
412 if (unlikely(ret != 0))
413 goto out_bind_failed;
414 if (func->needs_backup)
415 vmw_resource_mob_attach(res);
416 }
417
418
419
420
421
422 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
423 !res->coherent) {
424 if (res->backup->dirty && !res->dirty) {
425 ret = func->dirty_alloc(res);
426 if (ret)
427 return ret;
428 } else if (!res->backup->dirty && res->dirty) {
429 func->dirty_free(res);
430 }
431 }
432
433
434
435
436
437 if (res->dirty) {
438 if (dirtying && !res->res_dirty) {
439 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
440 pgoff_t end = __KERNEL_DIV_ROUND_UP
441 (res->backup_offset + res->backup_size,
442 PAGE_SIZE);
443
444 vmw_bo_dirty_unmap(res->backup, start, end);
445 }
446
447 vmw_bo_dirty_transfer_to_res(res);
448 return func->dirty_sync(res);
449 }
450
451 return 0;
452
453out_bind_failed:
454 func->destroy(res);
455
456 return ret;
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474void vmw_resource_unreserve(struct vmw_resource *res,
475 bool dirty_set,
476 bool dirty,
477 bool switch_backup,
478 struct vmw_buffer_object *new_backup,
479 unsigned long new_backup_offset)
480{
481 struct vmw_private *dev_priv = res->dev_priv;
482
483 if (!list_empty(&res->lru_head))
484 return;
485
486 if (switch_backup && new_backup != res->backup) {
487 if (res->backup) {
488 vmw_resource_mob_detach(res);
489 if (res->coherent)
490 vmw_bo_dirty_release(res->backup);
491 vmw_bo_unreference(&res->backup);
492 }
493
494 if (new_backup) {
495 res->backup = vmw_bo_reference(new_backup);
496
497
498
499
500
501 WARN_ON(res->coherent && !new_backup->dirty);
502
503 vmw_resource_mob_attach(res);
504 } else {
505 res->backup = NULL;
506 }
507 } else if (switch_backup && res->coherent) {
508 vmw_bo_dirty_release(res->backup);
509 }
510
511 if (switch_backup)
512 res->backup_offset = new_backup_offset;
513
514 if (dirty_set)
515 res->res_dirty = dirty;
516
517 if (!res->func->may_evict || res->id == -1 || res->pin_count)
518 return;
519
520 spin_lock(&dev_priv->resource_lock);
521 list_add_tail(&res->lru_head,
522 &res->dev_priv->res_lru[res->func->res_type]);
523 spin_unlock(&dev_priv->resource_lock);
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538static int
539vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
540 struct vmw_resource *res,
541 bool interruptible,
542 struct ttm_validate_buffer *val_buf)
543{
544 struct ttm_operation_ctx ctx = { true, false };
545 struct list_head val_list;
546 bool backup_dirty = false;
547 int ret;
548
549 if (unlikely(res->backup == NULL)) {
550 ret = vmw_resource_buf_alloc(res, interruptible);
551 if (unlikely(ret != 0))
552 return ret;
553 }
554
555 INIT_LIST_HEAD(&val_list);
556 ttm_bo_get(&res->backup->base);
557 val_buf->bo = &res->backup->base;
558 val_buf->num_shared = 0;
559 list_add_tail(&val_buf->head, &val_list);
560 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
561 if (unlikely(ret != 0))
562 goto out_no_reserve;
563
564 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
565 return 0;
566
567 backup_dirty = res->backup_dirty;
568 ret = ttm_bo_validate(&res->backup->base,
569 res->func->backup_placement,
570 &ctx);
571
572 if (unlikely(ret != 0))
573 goto out_no_validate;
574
575 return 0;
576
577out_no_validate:
578 ttm_eu_backoff_reservation(ticket, &val_list);
579out_no_reserve:
580 ttm_bo_put(val_buf->bo);
581 val_buf->bo = NULL;
582 if (backup_dirty)
583 vmw_bo_unreference(&res->backup);
584
585 return ret;
586}
587
588
589
590
591
592
593
594
595
596
597
598int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
599 bool no_backup)
600{
601 struct vmw_private *dev_priv = res->dev_priv;
602 int ret;
603
604 spin_lock(&dev_priv->resource_lock);
605 list_del_init(&res->lru_head);
606 spin_unlock(&dev_priv->resource_lock);
607
608 if (res->func->needs_backup && res->backup == NULL &&
609 !no_backup) {
610 ret = vmw_resource_buf_alloc(res, interruptible);
611 if (unlikely(ret != 0)) {
612 DRM_ERROR("Failed to allocate a backup buffer "
613 "of size %lu. bytes\n",
614 (unsigned long) res->backup_size);
615 return ret;
616 }
617 }
618
619 return 0;
620}
621
622
623
624
625
626
627
628
629static void
630vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
631 struct ttm_validate_buffer *val_buf)
632{
633 struct list_head val_list;
634
635 if (likely(val_buf->bo == NULL))
636 return;
637
638 INIT_LIST_HEAD(&val_list);
639 list_add_tail(&val_buf->head, &val_list);
640 ttm_eu_backoff_reservation(ticket, &val_list);
641 ttm_bo_put(val_buf->bo);
642 val_buf->bo = NULL;
643}
644
645
646
647
648
649
650
651
652
653static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
654 struct vmw_resource *res, bool interruptible)
655{
656 struct ttm_validate_buffer val_buf;
657 const struct vmw_res_func *func = res->func;
658 int ret;
659
660 BUG_ON(!func->may_evict);
661
662 val_buf.bo = NULL;
663 val_buf.num_shared = 0;
664 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
665 if (unlikely(ret != 0))
666 return ret;
667
668 if (unlikely(func->unbind != NULL &&
669 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
670 ret = func->unbind(res, res->res_dirty, &val_buf);
671 if (unlikely(ret != 0))
672 goto out_no_unbind;
673 vmw_resource_mob_detach(res);
674 }
675 ret = func->destroy(res);
676 res->backup_dirty = true;
677 res->res_dirty = false;
678out_no_unbind:
679 vmw_resource_backoff_reservation(ticket, &val_buf);
680
681 return ret;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700int vmw_resource_validate(struct vmw_resource *res, bool intr,
701 bool dirtying)
702{
703 int ret;
704 struct vmw_resource *evict_res;
705 struct vmw_private *dev_priv = res->dev_priv;
706 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
707 struct ttm_validate_buffer val_buf;
708 unsigned err_count = 0;
709
710 if (!res->func->create)
711 return 0;
712
713 val_buf.bo = NULL;
714 val_buf.num_shared = 0;
715 if (res->backup)
716 val_buf.bo = &res->backup->base;
717 do {
718 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
719 if (likely(ret != -EBUSY))
720 break;
721
722 spin_lock(&dev_priv->resource_lock);
723 if (list_empty(lru_list) || !res->func->may_evict) {
724 DRM_ERROR("Out of device device resources "
725 "for %s.\n", res->func->type_name);
726 ret = -EBUSY;
727 spin_unlock(&dev_priv->resource_lock);
728 break;
729 }
730
731 evict_res = vmw_resource_reference
732 (list_first_entry(lru_list, struct vmw_resource,
733 lru_head));
734 list_del_init(&evict_res->lru_head);
735
736 spin_unlock(&dev_priv->resource_lock);
737
738
739 ret = vmw_resource_do_evict(NULL, evict_res, intr);
740 if (unlikely(ret != 0)) {
741 spin_lock(&dev_priv->resource_lock);
742 list_add_tail(&evict_res->lru_head, lru_list);
743 spin_unlock(&dev_priv->resource_lock);
744 if (ret == -ERESTARTSYS ||
745 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
746 vmw_resource_unreference(&evict_res);
747 goto out_no_validate;
748 }
749 }
750
751 vmw_resource_unreference(&evict_res);
752 } while (1);
753
754 if (unlikely(ret != 0))
755 goto out_no_validate;
756 else if (!res->func->needs_backup && res->backup) {
757 WARN_ON_ONCE(vmw_resource_mob_attached(res));
758 vmw_bo_unreference(&res->backup);
759 }
760
761 return 0;
762
763out_no_validate:
764 return ret;
765}
766
767
768
769
770
771
772
773
774
775
776
777
778
779void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
780{
781 struct ttm_validate_buffer val_buf = {
782 .bo = &vbo->base,
783 .num_shared = 0
784 };
785
786 dma_resv_assert_held(vbo->base.base.resv);
787 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
788 struct rb_node *node = vbo->res_tree.rb_node;
789 struct vmw_resource *res =
790 container_of(node, struct vmw_resource, mob_node);
791
792 if (!WARN_ON_ONCE(!res->func->unbind))
793 (void) res->func->unbind(res, res->res_dirty, &val_buf);
794
795 res->backup_dirty = true;
796 res->res_dirty = false;
797 vmw_resource_mob_detach(res);
798 }
799
800 (void) ttm_bo_wait(&vbo->base, false, false);
801}
802
803
804
805
806
807
808
809
810
811
812int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
813{
814 struct vmw_resource *dx_query_ctx;
815 struct vmw_private *dev_priv;
816 struct {
817 SVGA3dCmdHeader header;
818 SVGA3dCmdDXReadbackAllQuery body;
819 } *cmd;
820
821
822
823 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
824 return 0;
825
826 dx_query_ctx = dx_query_mob->dx_query_ctx;
827 dev_priv = dx_query_ctx->dev_priv;
828
829 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
830 if (unlikely(cmd == NULL))
831 return -ENOMEM;
832
833 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
834 cmd->header.size = sizeof(cmd->body);
835 cmd->body.cid = dx_query_ctx->id;
836
837 vmw_cmd_commit(dev_priv, sizeof(*cmd));
838
839
840 dx_query_mob->dx_query_ctx = NULL;
841
842 return 0;
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857void vmw_query_move_notify(struct ttm_buffer_object *bo,
858 struct ttm_resource *old_mem,
859 struct ttm_resource *new_mem)
860{
861 struct vmw_buffer_object *dx_query_mob;
862 struct ttm_device *bdev = bo->bdev;
863 struct vmw_private *dev_priv;
864
865
866 dev_priv = container_of(bdev, struct vmw_private, bdev);
867
868 mutex_lock(&dev_priv->binding_mutex);
869
870 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
871 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
872 mutex_unlock(&dev_priv->binding_mutex);
873 return;
874 }
875
876
877 if (new_mem->mem_type == TTM_PL_SYSTEM &&
878 old_mem->mem_type == VMW_PL_MOB) {
879 struct vmw_fence_obj *fence;
880
881 (void) vmw_query_readback_all(dx_query_mob);
882 mutex_unlock(&dev_priv->binding_mutex);
883
884
885 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
886 vmw_bo_fence_single(bo, fence);
887
888 if (fence != NULL)
889 vmw_fence_obj_unreference(&fence);
890
891 (void) ttm_bo_wait(bo, false, false);
892 } else
893 mutex_unlock(&dev_priv->binding_mutex);
894
895}
896
897
898
899
900
901
902bool vmw_resource_needs_backup(const struct vmw_resource *res)
903{
904 return res->func->needs_backup;
905}
906
907
908
909
910
911
912
913
914
915
916static void vmw_resource_evict_type(struct vmw_private *dev_priv,
917 enum vmw_res_type type)
918{
919 struct list_head *lru_list = &dev_priv->res_lru[type];
920 struct vmw_resource *evict_res;
921 unsigned err_count = 0;
922 int ret;
923 struct ww_acquire_ctx ticket;
924
925 do {
926 spin_lock(&dev_priv->resource_lock);
927
928 if (list_empty(lru_list))
929 goto out_unlock;
930
931 evict_res = vmw_resource_reference(
932 list_first_entry(lru_list, struct vmw_resource,
933 lru_head));
934 list_del_init(&evict_res->lru_head);
935 spin_unlock(&dev_priv->resource_lock);
936
937
938 ret = vmw_resource_do_evict(&ticket, evict_res, false);
939 if (unlikely(ret != 0)) {
940 spin_lock(&dev_priv->resource_lock);
941 list_add_tail(&evict_res->lru_head, lru_list);
942 spin_unlock(&dev_priv->resource_lock);
943 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
944 vmw_resource_unreference(&evict_res);
945 return;
946 }
947 }
948
949 vmw_resource_unreference(&evict_res);
950 } while (1);
951
952out_unlock:
953 spin_unlock(&dev_priv->resource_lock);
954}
955
956
957
958
959
960
961
962
963
964
965
966void vmw_resource_evict_all(struct vmw_private *dev_priv)
967{
968 enum vmw_res_type type;
969
970 mutex_lock(&dev_priv->cmdbuf_mutex);
971
972 for (type = 0; type < vmw_res_max; ++type)
973 vmw_resource_evict_type(dev_priv, type);
974
975 mutex_unlock(&dev_priv->cmdbuf_mutex);
976}
977
978
979
980
981
982
983
984
985
986
987
988int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
989{
990 struct ttm_operation_ctx ctx = { interruptible, false };
991 struct vmw_private *dev_priv = res->dev_priv;
992 int ret;
993
994 mutex_lock(&dev_priv->cmdbuf_mutex);
995 ret = vmw_resource_reserve(res, interruptible, false);
996 if (ret)
997 goto out_no_reserve;
998
999 if (res->pin_count == 0) {
1000 struct vmw_buffer_object *vbo = NULL;
1001
1002 if (res->backup) {
1003 vbo = res->backup;
1004
1005 ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1006 if (ret)
1007 goto out_no_validate;
1008 if (!vbo->base.pin_count) {
1009 ret = ttm_bo_validate
1010 (&vbo->base,
1011 res->func->backup_placement,
1012 &ctx);
1013 if (ret) {
1014 ttm_bo_unreserve(&vbo->base);
1015 goto out_no_validate;
1016 }
1017 }
1018
1019
1020 vmw_bo_pin_reserved(vbo, true);
1021 }
1022 ret = vmw_resource_validate(res, interruptible, true);
1023 if (vbo)
1024 ttm_bo_unreserve(&vbo->base);
1025 if (ret)
1026 goto out_no_validate;
1027 }
1028 res->pin_count++;
1029
1030out_no_validate:
1031 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1032out_no_reserve:
1033 mutex_unlock(&dev_priv->cmdbuf_mutex);
1034
1035 return ret;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046void vmw_resource_unpin(struct vmw_resource *res)
1047{
1048 struct vmw_private *dev_priv = res->dev_priv;
1049 int ret;
1050
1051 mutex_lock(&dev_priv->cmdbuf_mutex);
1052
1053 ret = vmw_resource_reserve(res, false, true);
1054 WARN_ON(ret);
1055
1056 WARN_ON(res->pin_count == 0);
1057 if (--res->pin_count == 0 && res->backup) {
1058 struct vmw_buffer_object *vbo = res->backup;
1059
1060 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1061 vmw_bo_pin_reserved(vbo, false);
1062 ttm_bo_unreserve(&vbo->base);
1063 }
1064
1065 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1066
1067 mutex_unlock(&dev_priv->cmdbuf_mutex);
1068}
1069
1070
1071
1072
1073
1074
1075enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1076{
1077 return res->func->res_type;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088 pgoff_t end)
1089{
1090 if (res->dirty)
1091 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1092 end << PAGE_SHIFT);
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1104 pgoff_t end, pgoff_t *num_prefault)
1105{
1106 struct rb_node *cur = vbo->res_tree.rb_node;
1107 struct vmw_resource *found = NULL;
1108 unsigned long res_start = start << PAGE_SHIFT;
1109 unsigned long res_end = end << PAGE_SHIFT;
1110 unsigned long last_cleaned = 0;
1111
1112
1113
1114
1115
1116 while (cur) {
1117 struct vmw_resource *cur_res =
1118 container_of(cur, struct vmw_resource, mob_node);
1119
1120 if (cur_res->backup_offset >= res_end) {
1121 cur = cur->rb_left;
1122 } else if (cur_res->backup_offset + cur_res->backup_size <=
1123 res_start) {
1124 cur = cur->rb_right;
1125 } else {
1126 found = cur_res;
1127 cur = cur->rb_left;
1128
1129 }
1130 }
1131
1132
1133
1134
1135
1136 while (found) {
1137 if (found->res_dirty) {
1138 int ret;
1139
1140 if (!found->func->clean)
1141 return -EINVAL;
1142
1143 ret = found->func->clean(found);
1144 if (ret)
1145 return ret;
1146
1147 found->res_dirty = false;
1148 }
1149 last_cleaned = found->backup_offset + found->backup_size;
1150 cur = rb_next(&found->mob_node);
1151 if (!cur)
1152 break;
1153
1154 found = container_of(cur, struct vmw_resource, mob_node);
1155 if (found->backup_offset >= res_end)
1156 break;
1157 }
1158
1159
1160
1161
1162 *num_prefault = 1;
1163 if (last_cleaned > res_start) {
1164 struct ttm_buffer_object *bo = &vbo->base;
1165
1166 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1167 PAGE_SIZE);
1168 vmw_bo_fence_single(bo, NULL);
1169 if (bo->moving)
1170 dma_fence_put(bo->moving);
1171 bo->moving = dma_fence_get
1172 (dma_resv_excl_fence(bo->base.resv));
1173 }
1174
1175 return 0;
1176}
1177