1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include "vmwgfx_drv.h"
32#include "ttm_object.h"
33
34
35
36
37
38
39
40
41struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44};
45
46
47
48
49
50
51
52
53
54
55static struct vmw_buffer_object *
56vmw_buffer_object(struct ttm_buffer_object *bo)
57{
58 return container_of(bo, struct vmw_buffer_object, base);
59}
60
61
62
63
64
65
66
67
68
69
70static struct vmw_user_buffer_object *
71vmw_user_buffer_object(struct ttm_buffer_object *bo)
72{
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93{
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 struct vmw_buffer_object *buf,
141 bool interruptible)
142{
143 struct ttm_operation_ctx ctx = {interruptible, false };
144 struct ttm_buffer_object *bo = &buf->base;
145 int ret;
146 uint32_t new_flags;
147
148 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 if (unlikely(ret != 0))
150 return ret;
151
152 vmw_execbuf_release_pinned_bo(dev_priv);
153
154 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 if (unlikely(ret != 0))
156 goto err;
157
158 if (buf->pin_count > 0) {
159 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 &new_flags) == true ? 0 : -EINVAL;
161 goto out_unreserve;
162 }
163
164 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 if (likely(ret == 0) || ret == -ERESTARTSYS)
166 goto out_unreserve;
167
168 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169
170out_unreserve:
171 if (!ret)
172 vmw_bo_pin_reserved(buf, true);
173
174 ttm_bo_unreserve(bo);
175err:
176 ttm_write_unlock(&dev_priv->reservation_sem);
177 return ret;
178}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 struct vmw_buffer_object *buf,
195 bool interruptible)
196{
197 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 interruptible);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 struct vmw_buffer_object *buf,
216 bool interruptible)
217{
218 struct ttm_operation_ctx ctx = {interruptible, false };
219 struct ttm_buffer_object *bo = &buf->base;
220 struct ttm_placement placement;
221 struct ttm_place place;
222 int ret = 0;
223 uint32_t new_flags;
224
225 place = vmw_vram_placement.placement[0];
226 place.lpfn = bo->num_pages;
227 placement.num_placement = 1;
228 placement.placement = &place;
229 placement.num_busy_placement = 1;
230 placement.busy_placement = &place;
231
232 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 if (unlikely(ret != 0))
234 return ret;
235
236 vmw_execbuf_release_pinned_bo(dev_priv);
237 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 if (unlikely(ret != 0))
239 goto err_unlock;
240
241
242
243
244
245
246 if (bo->mem.mem_type == TTM_PL_VRAM &&
247 bo->mem.start < bo->num_pages &&
248 bo->mem.start > 0 &&
249 buf->pin_count == 0) {
250 ctx.interruptible = false;
251 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 }
253
254 if (buf->pin_count > 0)
255 ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 &new_flags) == true ? 0 : -EINVAL;
257 else
258 ret = ttm_bo_validate(bo, &placement, &ctx);
259
260
261 WARN_ON(ret == 0 && bo->offset != 0);
262 if (!ret)
263 vmw_bo_pin_reserved(buf, true);
264
265 ttm_bo_unreserve(bo);
266err_unlock:
267 ttm_write_unlock(&dev_priv->reservation_sem);
268
269 return ret;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284int vmw_bo_unpin(struct vmw_private *dev_priv,
285 struct vmw_buffer_object *buf,
286 bool interruptible)
287{
288 struct ttm_buffer_object *bo = &buf->base;
289 int ret;
290
291 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 if (unlikely(ret != 0))
293 return ret;
294
295 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 if (unlikely(ret != 0))
297 goto err;
298
299 vmw_bo_pin_reserved(buf, false);
300
301 ttm_bo_unreserve(bo);
302
303err:
304 ttm_read_unlock(&dev_priv->reservation_sem);
305 return ret;
306}
307
308
309
310
311
312
313
314
315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 SVGAGuestPtr *ptr)
317{
318 if (bo->mem.mem_type == TTM_PL_VRAM) {
319 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 ptr->offset = bo->offset;
321 } else {
322 ptr->gmrId = bo->mem.start;
323 ptr->offset = 0;
324 }
325}
326
327
328
329
330
331
332
333
334
335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336{
337 struct ttm_operation_ctx ctx = { false, true };
338 struct ttm_place pl;
339 struct ttm_placement placement;
340 struct ttm_buffer_object *bo = &vbo->base;
341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret;
343
344 dma_resv_assert_held(bo->base.resv);
345
346 if (pin) {
347 if (vbo->pin_count++ > 0)
348 return;
349 } else {
350 WARN_ON(vbo->pin_count <= 0);
351 if (--vbo->pin_count > 0)
352 return;
353 }
354
355 pl.fpfn = 0;
356 pl.lpfn = 0;
357 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
358 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
359 if (pin)
360 pl.flags |= TTM_PL_FLAG_NO_EVICT;
361
362 memset(&placement, 0, sizeof(placement));
363 placement.num_placement = 1;
364 placement.placement = &pl;
365
366 ret = ttm_bo_validate(bo, &placement, &ctx);
367
368 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
388{
389 struct ttm_buffer_object *bo = &vbo->base;
390 bool not_used;
391 void *virtual;
392 int ret;
393
394 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
395 if (virtual)
396 return virtual;
397
398 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
399 if (ret)
400 DRM_ERROR("Buffer object map failed: %d.\n", ret);
401
402 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
403}
404
405
406
407
408
409
410
411
412
413
414void vmw_bo_unmap(struct vmw_buffer_object *vbo)
415{
416 if (vbo->map.bo == NULL)
417 return;
418
419 ttm_bo_kunmap(&vbo->map);
420}
421
422
423
424
425
426
427
428
429
430static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
431 bool user)
432{
433 static size_t struct_size, user_struct_size;
434 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
435 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
436
437 if (unlikely(struct_size == 0)) {
438 size_t backend_size = ttm_round_pot(vmw_tt_size);
439
440 struct_size = backend_size +
441 ttm_round_pot(sizeof(struct vmw_buffer_object));
442 user_struct_size = backend_size +
443 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
444 TTM_OBJ_EXTRA_SIZE;
445 }
446
447 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
448 page_array_size +=
449 ttm_round_pot(num_pages * sizeof(dma_addr_t));
450
451 return ((user) ? user_struct_size : struct_size) +
452 page_array_size;
453}
454
455
456
457
458
459
460
461void vmw_bo_bo_free(struct ttm_buffer_object *bo)
462{
463 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
464
465 WARN_ON(vmw_bo->dirty);
466 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
467 vmw_bo_unmap(vmw_bo);
468 kfree(vmw_bo);
469}
470
471
472
473
474
475
476
477static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
478{
479 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
480 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
481
482 WARN_ON(vbo->dirty);
483 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
484 vmw_bo_unmap(vbo);
485 ttm_prime_object_kfree(vmw_user_bo, prime);
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502int vmw_bo_init(struct vmw_private *dev_priv,
503 struct vmw_buffer_object *vmw_bo,
504 size_t size, struct ttm_placement *placement,
505 bool interruptible,
506 void (*bo_free)(struct ttm_buffer_object *bo))
507{
508 struct ttm_bo_device *bdev = &dev_priv->bdev;
509 size_t acc_size;
510 int ret;
511 bool user = (bo_free == &vmw_user_bo_destroy);
512
513 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
514
515 acc_size = vmw_bo_acc_size(dev_priv, size, user);
516 memset(vmw_bo, 0, sizeof(*vmw_bo));
517 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
518 vmw_bo->base.priority = 3;
519 vmw_bo->res_tree = RB_ROOT;
520
521 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
522 ttm_bo_type_device, placement,
523 0, interruptible, acc_size,
524 NULL, NULL, bo_free);
525 return ret;
526}
527
528
529
530
531
532
533
534
535
536
537
538static void vmw_user_bo_release(struct ttm_base_object **p_base)
539{
540 struct vmw_user_buffer_object *vmw_user_bo;
541 struct ttm_base_object *base = *p_base;
542
543 *p_base = NULL;
544
545 if (unlikely(base == NULL))
546 return;
547
548 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
549 prime.base);
550 ttm_bo_put(&vmw_user_bo->vbo.base);
551}
552
553
554
555
556
557
558
559
560
561
562
563
564static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
565 enum ttm_ref_type ref_type)
566{
567 struct vmw_user_buffer_object *user_bo;
568
569 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
570
571 switch (ref_type) {
572 case TTM_REF_SYNCCPU_WRITE:
573 atomic_dec(&user_bo->vbo.cpu_writers);
574 break;
575 default:
576 WARN_ONCE(true, "Undefined buffer object reference release.\n");
577 }
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594int vmw_user_bo_alloc(struct vmw_private *dev_priv,
595 struct ttm_object_file *tfile,
596 uint32_t size,
597 bool shareable,
598 uint32_t *handle,
599 struct vmw_buffer_object **p_vbo,
600 struct ttm_base_object **p_base)
601{
602 struct vmw_user_buffer_object *user_bo;
603 int ret;
604
605 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
606 if (unlikely(!user_bo)) {
607 DRM_ERROR("Failed to allocate a buffer.\n");
608 return -ENOMEM;
609 }
610
611 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
612 (dev_priv->has_mob) ?
613 &vmw_sys_placement :
614 &vmw_vram_sys_placement, true,
615 &vmw_user_bo_destroy);
616 if (unlikely(ret != 0))
617 return ret;
618
619 ttm_bo_get(&user_bo->vbo.base);
620 ret = ttm_prime_object_init(tfile,
621 size,
622 &user_bo->prime,
623 shareable,
624 ttm_buffer_type,
625 &vmw_user_bo_release,
626 &vmw_user_bo_ref_obj_release);
627 if (unlikely(ret != 0)) {
628 ttm_bo_put(&user_bo->vbo.base);
629 goto out_no_base_object;
630 }
631
632 *p_vbo = &user_bo->vbo;
633 if (p_base) {
634 *p_base = &user_bo->prime.base;
635 kref_get(&(*p_base)->refcount);
636 }
637 *handle = user_bo->prime.base.handle;
638
639out_no_base_object:
640 return ret;
641}
642
643
644
645
646
647
648
649
650
651int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
652 struct ttm_object_file *tfile)
653{
654 struct vmw_user_buffer_object *vmw_user_bo;
655
656 if (unlikely(bo->destroy != vmw_user_bo_destroy))
657 return -EPERM;
658
659 vmw_user_bo = vmw_user_buffer_object(bo);
660
661
662 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
663 return 0;
664
665 DRM_ERROR("Could not grant buffer access.\n");
666 return -EPERM;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
686 struct ttm_object_file *tfile,
687 uint32_t flags)
688{
689 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
690 struct ttm_buffer_object *bo = &user_bo->vbo.base;
691 bool existed;
692 int ret;
693
694 if (flags & drm_vmw_synccpu_allow_cs) {
695 long lret;
696
697 lret = dma_resv_wait_timeout_rcu
698 (bo->base.resv, true, true,
699 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
700 if (!lret)
701 return -EBUSY;
702 else if (lret < 0)
703 return lret;
704 return 0;
705 }
706
707 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
708 if (unlikely(ret != 0))
709 return ret;
710
711 ret = ttm_bo_wait(bo, true, nonblock);
712 if (likely(ret == 0))
713 atomic_inc(&user_bo->vbo.cpu_writers);
714
715 ttm_bo_unreserve(bo);
716 if (unlikely(ret != 0))
717 return ret;
718
719 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
720 TTM_REF_SYNCCPU_WRITE, &existed, false);
721 if (ret != 0 || existed)
722 atomic_dec(&user_bo->vbo.cpu_writers);
723
724 return ret;
725}
726
727
728
729
730
731
732
733
734
735static int vmw_user_bo_synccpu_release(uint32_t handle,
736 struct ttm_object_file *tfile,
737 uint32_t flags)
738{
739 if (!(flags & drm_vmw_synccpu_allow_cs))
740 return ttm_ref_object_base_unref(tfile, handle,
741 TTM_REF_SYNCCPU_WRITE);
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
760 struct drm_file *file_priv)
761{
762 struct drm_vmw_synccpu_arg *arg =
763 (struct drm_vmw_synccpu_arg *) data;
764 struct vmw_buffer_object *vbo;
765 struct vmw_user_buffer_object *user_bo;
766 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
767 struct ttm_base_object *buffer_base;
768 int ret;
769
770 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
771 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
772 drm_vmw_synccpu_dontblock |
773 drm_vmw_synccpu_allow_cs)) != 0) {
774 DRM_ERROR("Illegal synccpu flags.\n");
775 return -EINVAL;
776 }
777
778 switch (arg->op) {
779 case drm_vmw_synccpu_grab:
780 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
781 &buffer_base);
782 if (unlikely(ret != 0))
783 return ret;
784
785 user_bo = container_of(vbo, struct vmw_user_buffer_object,
786 vbo);
787 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
788 vmw_bo_unreference(&vbo);
789 ttm_base_object_unref(&buffer_base);
790 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
791 ret != -EBUSY)) {
792 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
793 (unsigned int) arg->handle);
794 return ret;
795 }
796 break;
797 case drm_vmw_synccpu_release:
798 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
799 arg->flags);
800 if (unlikely(ret != 0)) {
801 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
802 (unsigned int) arg->handle);
803 return ret;
804 }
805 break;
806 default:
807 DRM_ERROR("Invalid synccpu operation.\n");
808 return -EINVAL;
809 }
810
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
828 struct drm_file *file_priv)
829{
830 struct vmw_private *dev_priv = vmw_priv(dev);
831 union drm_vmw_alloc_dmabuf_arg *arg =
832 (union drm_vmw_alloc_dmabuf_arg *)data;
833 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
834 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
835 struct vmw_buffer_object *vbo;
836 uint32_t handle;
837 int ret;
838
839 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
840 if (unlikely(ret != 0))
841 return ret;
842
843 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
844 req->size, false, &handle, &vbo,
845 NULL);
846 if (unlikely(ret != 0))
847 goto out_no_bo;
848
849 rep->handle = handle;
850 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
851 rep->cur_gmr_id = handle;
852 rep->cur_gmr_offset = 0;
853
854 vmw_bo_unreference(&vbo);
855
856out_no_bo:
857 ttm_read_unlock(&dev_priv->reservation_sem);
858
859 return ret;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
876{
877 struct drm_vmw_unref_dmabuf_arg *arg =
878 (struct drm_vmw_unref_dmabuf_arg *)data;
879
880 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
881 arg->handle,
882 TTM_REF_USAGE);
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900int vmw_user_bo_lookup(struct ttm_object_file *tfile,
901 uint32_t handle, struct vmw_buffer_object **out,
902 struct ttm_base_object **p_base)
903{
904 struct vmw_user_buffer_object *vmw_user_bo;
905 struct ttm_base_object *base;
906
907 base = ttm_base_object_lookup(tfile, handle);
908 if (unlikely(base == NULL)) {
909 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
910 (unsigned long)handle);
911 return -ESRCH;
912 }
913
914 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
915 ttm_base_object_unref(&base);
916 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
917 (unsigned long)handle);
918 return -EINVAL;
919 }
920
921 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
922 prime.base);
923 ttm_bo_get(&vmw_user_bo->vbo.base);
924 if (p_base)
925 *p_base = base;
926 else
927 ttm_base_object_unref(&base);
928 *out = &vmw_user_bo->vbo;
929
930 return 0;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950struct vmw_buffer_object *
951vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
952{
953 struct vmw_user_buffer_object *vmw_user_bo;
954 struct ttm_base_object *base;
955
956 base = ttm_base_object_noref_lookup(tfile, handle);
957 if (!base) {
958 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
959 (unsigned long)handle);
960 return ERR_PTR(-ESRCH);
961 }
962
963 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
964 ttm_base_object_noref_release();
965 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
966 (unsigned long)handle);
967 return ERR_PTR(-EINVAL);
968 }
969
970 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
971 prime.base);
972 return &vmw_user_bo->vbo;
973}
974
975
976
977
978
979
980
981
982
983int vmw_user_bo_reference(struct ttm_object_file *tfile,
984 struct vmw_buffer_object *vbo,
985 uint32_t *handle)
986{
987 struct vmw_user_buffer_object *user_bo;
988
989 if (vbo->base.destroy != vmw_user_bo_destroy)
990 return -EINVAL;
991
992 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
993
994 *handle = user_bo->prime.base.handle;
995 return ttm_ref_object_add(tfile, &user_bo->prime.base,
996 TTM_REF_USAGE, NULL, false);
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1013 struct vmw_fence_obj *fence)
1014{
1015 struct ttm_bo_device *bdev = bo->bdev;
1016
1017 struct vmw_private *dev_priv =
1018 container_of(bdev, struct vmw_private, bdev);
1019
1020 if (fence == NULL) {
1021 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1022 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1023 dma_fence_put(&fence->base);
1024 } else
1025 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041int vmw_dumb_create(struct drm_file *file_priv,
1042 struct drm_device *dev,
1043 struct drm_mode_create_dumb *args)
1044{
1045 struct vmw_private *dev_priv = vmw_priv(dev);
1046 struct vmw_buffer_object *vbo;
1047 int ret;
1048
1049 args->pitch = args->width * ((args->bpp + 7) / 8);
1050 args->size = args->pitch * args->height;
1051
1052 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1053 if (unlikely(ret != 0))
1054 return ret;
1055
1056 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1057 args->size, false, &args->handle,
1058 &vbo, NULL);
1059 if (unlikely(ret != 0))
1060 goto out_no_bo;
1061
1062 vmw_bo_unreference(&vbo);
1063out_no_bo:
1064 ttm_read_unlock(&dev_priv->reservation_sem);
1065 return ret;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080int vmw_dumb_map_offset(struct drm_file *file_priv,
1081 struct drm_device *dev, uint32_t handle,
1082 uint64_t *offset)
1083{
1084 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1085 struct vmw_buffer_object *out_buf;
1086 int ret;
1087
1088 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1089 if (ret != 0)
1090 return -EINVAL;
1091
1092 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1093 vmw_bo_unreference(&out_buf);
1094 return 0;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108int vmw_dumb_destroy(struct drm_file *file_priv,
1109 struct drm_device *dev,
1110 uint32_t handle)
1111{
1112 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1113 handle, TTM_REF_USAGE);
1114}
1115
1116
1117
1118
1119
1120
1121
1122void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1123{
1124
1125 if (bo->destroy != vmw_bo_bo_free &&
1126 bo->destroy != vmw_user_bo_destroy)
1127 return;
1128
1129
1130 vmw_bo_unmap(vmw_buffer_object(bo));
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1145 struct ttm_mem_reg *mem)
1146{
1147 struct vmw_buffer_object *vbo;
1148
1149 if (mem == NULL)
1150 return;
1151
1152
1153 if (bo->destroy != vmw_bo_bo_free &&
1154 bo->destroy != vmw_user_bo_destroy)
1155 return;
1156
1157 vbo = container_of(bo, struct vmw_buffer_object, base);
1158
1159
1160
1161
1162
1163
1164 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1165 vmw_bo_unmap(vbo);
1166
1167
1168
1169
1170
1171
1172 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1173 vmw_resource_unbind_list(vbo);
1174}
1175