1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include <drm/drmP.h>
32#include "vmwgfx_drv.h"
33#include "ttm_object.h"
34
35
36
37
38
39
40
41
42struct vmw_user_buffer_object {
43 struct ttm_prime_object prime;
44 struct vmw_buffer_object vbo;
45};
46
47
48
49
50
51
52
53
54
55
56static struct vmw_buffer_object *
57vmw_buffer_object(struct ttm_buffer_object *bo)
58{
59 return container_of(bo, struct vmw_buffer_object, base);
60}
61
62
63
64
65
66
67
68
69
70
71static struct vmw_user_buffer_object *
72vmw_user_buffer_object(struct ttm_buffer_object *bo)
73{
74 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
75
76 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
77}
78
79
80
81
82
83
84
85
86
87
88
89
90int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
91 struct vmw_buffer_object *buf,
92 struct ttm_placement *placement,
93 bool interruptible)
94{
95 struct ttm_operation_ctx ctx = {interruptible, false };
96 struct ttm_buffer_object *bo = &buf->base;
97 int ret;
98 uint32_t new_flags;
99
100 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
101 if (unlikely(ret != 0))
102 return ret;
103
104 vmw_execbuf_release_pinned_bo(dev_priv);
105
106 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
107 if (unlikely(ret != 0))
108 goto err;
109
110 if (buf->pin_count > 0)
111 ret = ttm_bo_mem_compat(placement, &bo->mem,
112 &new_flags) == true ? 0 : -EINVAL;
113 else
114 ret = ttm_bo_validate(bo, placement, &ctx);
115
116 if (!ret)
117 vmw_bo_pin_reserved(buf, true);
118
119 ttm_bo_unreserve(bo);
120
121err:
122 ttm_write_unlock(&dev_priv->reservation_sem);
123 return ret;
124}
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
141 struct vmw_buffer_object *buf,
142 bool interruptible)
143{
144 struct ttm_operation_ctx ctx = {interruptible, false };
145 struct ttm_buffer_object *bo = &buf->base;
146 int ret;
147 uint32_t new_flags;
148
149 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
150 if (unlikely(ret != 0))
151 return ret;
152
153 vmw_execbuf_release_pinned_bo(dev_priv);
154
155 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
156 if (unlikely(ret != 0))
157 goto err;
158
159 if (buf->pin_count > 0) {
160 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
161 &new_flags) == true ? 0 : -EINVAL;
162 goto out_unreserve;
163 }
164
165 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
166 if (likely(ret == 0) || ret == -ERESTARTSYS)
167 goto out_unreserve;
168
169 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
170
171out_unreserve:
172 if (!ret)
173 vmw_bo_pin_reserved(buf, true);
174
175 ttm_bo_unreserve(bo);
176err:
177 ttm_write_unlock(&dev_priv->reservation_sem);
178 return ret;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
195 struct vmw_buffer_object *buf,
196 bool interruptible)
197{
198 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
199 interruptible);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
216 struct vmw_buffer_object *buf,
217 bool interruptible)
218{
219 struct ttm_operation_ctx ctx = {interruptible, false };
220 struct ttm_buffer_object *bo = &buf->base;
221 struct ttm_placement placement;
222 struct ttm_place place;
223 int ret = 0;
224 uint32_t new_flags;
225
226 place = vmw_vram_placement.placement[0];
227 place.lpfn = bo->num_pages;
228 placement.num_placement = 1;
229 placement.placement = &place;
230 placement.num_busy_placement = 1;
231 placement.busy_placement = &place;
232
233 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
234 if (unlikely(ret != 0))
235 return ret;
236
237 vmw_execbuf_release_pinned_bo(dev_priv);
238 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
239 if (unlikely(ret != 0))
240 goto err_unlock;
241
242
243
244
245
246
247 if (bo->mem.mem_type == TTM_PL_VRAM &&
248 bo->mem.start < bo->num_pages &&
249 bo->mem.start > 0 &&
250 buf->pin_count == 0) {
251 ctx.interruptible = false;
252 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
253 }
254
255 if (buf->pin_count > 0)
256 ret = ttm_bo_mem_compat(&placement, &bo->mem,
257 &new_flags) == true ? 0 : -EINVAL;
258 else
259 ret = ttm_bo_validate(bo, &placement, &ctx);
260
261
262 WARN_ON(ret == 0 && bo->offset != 0);
263 if (!ret)
264 vmw_bo_pin_reserved(buf, true);
265
266 ttm_bo_unreserve(bo);
267err_unlock:
268 ttm_write_unlock(&dev_priv->reservation_sem);
269
270 return ret;
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285int vmw_bo_unpin(struct vmw_private *dev_priv,
286 struct vmw_buffer_object *buf,
287 bool interruptible)
288{
289 struct ttm_buffer_object *bo = &buf->base;
290 int ret;
291
292 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
293 if (unlikely(ret != 0))
294 return ret;
295
296 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
297 if (unlikely(ret != 0))
298 goto err;
299
300 vmw_bo_pin_reserved(buf, false);
301
302 ttm_bo_unreserve(bo);
303
304err:
305 ttm_read_unlock(&dev_priv->reservation_sem);
306 return ret;
307}
308
309
310
311
312
313
314
315
316void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
317 SVGAGuestPtr *ptr)
318{
319 if (bo->mem.mem_type == TTM_PL_VRAM) {
320 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
321 ptr->offset = bo->offset;
322 } else {
323 ptr->gmrId = bo->mem.start;
324 ptr->offset = 0;
325 }
326}
327
328
329
330
331
332
333
334
335
336void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
337{
338 struct ttm_operation_ctx ctx = { false, true };
339 struct ttm_place pl;
340 struct ttm_placement placement;
341 struct ttm_buffer_object *bo = &vbo->base;
342 uint32_t old_mem_type = bo->mem.mem_type;
343 int ret;
344
345 lockdep_assert_held(&bo->resv->lock.base);
346
347 if (pin) {
348 if (vbo->pin_count++ > 0)
349 return;
350 } else {
351 WARN_ON(vbo->pin_count <= 0);
352 if (--vbo->pin_count > 0)
353 return;
354 }
355
356 pl.fpfn = 0;
357 pl.lpfn = 0;
358 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
359 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
360 if (pin)
361 pl.flags |= TTM_PL_FLAG_NO_EVICT;
362
363 memset(&placement, 0, sizeof(placement));
364 placement.num_placement = 1;
365 placement.placement = &pl;
366
367 ret = ttm_bo_validate(bo, &placement, &ctx);
368
369 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
389{
390 struct ttm_buffer_object *bo = &vbo->base;
391 bool not_used;
392 void *virtual;
393 int ret;
394
395 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
396 if (virtual)
397 return virtual;
398
399 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
400 if (ret)
401 DRM_ERROR("Buffer object map failed: %d.\n", ret);
402
403 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
404}
405
406
407
408
409
410
411
412
413
414
415void vmw_bo_unmap(struct vmw_buffer_object *vbo)
416{
417 if (vbo->map.bo == NULL)
418 return;
419
420 ttm_bo_kunmap(&vbo->map);
421}
422
423
424
425
426
427
428
429
430
431static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
432 bool user)
433{
434 static size_t struct_size, user_struct_size;
435 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
436 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
437
438 if (unlikely(struct_size == 0)) {
439 size_t backend_size = ttm_round_pot(vmw_tt_size);
440
441 struct_size = backend_size +
442 ttm_round_pot(sizeof(struct vmw_buffer_object));
443 user_struct_size = backend_size +
444 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
445 TTM_OBJ_EXTRA_SIZE;
446 }
447
448 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
449 page_array_size +=
450 ttm_round_pot(num_pages * sizeof(dma_addr_t));
451
452 return ((user) ? user_struct_size : struct_size) +
453 page_array_size;
454}
455
456
457
458
459
460
461
462void vmw_bo_bo_free(struct ttm_buffer_object *bo)
463{
464 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
465
466 vmw_bo_unmap(vmw_bo);
467 kfree(vmw_bo);
468}
469
470
471
472
473
474
475
476static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
477{
478 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
479
480 vmw_bo_unmap(&vmw_user_bo->vbo);
481 ttm_prime_object_kfree(vmw_user_bo, prime);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498int vmw_bo_init(struct vmw_private *dev_priv,
499 struct vmw_buffer_object *vmw_bo,
500 size_t size, struct ttm_placement *placement,
501 bool interruptible,
502 void (*bo_free)(struct ttm_buffer_object *bo))
503{
504 struct ttm_bo_device *bdev = &dev_priv->bdev;
505 size_t acc_size;
506 int ret;
507 bool user = (bo_free == &vmw_user_bo_destroy);
508
509 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
510
511 acc_size = vmw_bo_acc_size(dev_priv, size, user);
512 memset(vmw_bo, 0, sizeof(*vmw_bo));
513
514 INIT_LIST_HEAD(&vmw_bo->res_list);
515
516 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
517 ttm_bo_type_device, placement,
518 0, interruptible, acc_size,
519 NULL, NULL, bo_free);
520 return ret;
521}
522
523
524
525
526
527
528
529
530
531
532
533static void vmw_user_bo_release(struct ttm_base_object **p_base)
534{
535 struct vmw_user_buffer_object *vmw_user_bo;
536 struct ttm_base_object *base = *p_base;
537 struct ttm_buffer_object *bo;
538
539 *p_base = NULL;
540
541 if (unlikely(base == NULL))
542 return;
543
544 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
545 prime.base);
546 bo = &vmw_user_bo->vbo.base;
547 ttm_bo_unref(&bo);
548}
549
550
551
552
553
554
555
556
557
558
559
560
561static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
562 enum ttm_ref_type ref_type)
563{
564 struct vmw_user_buffer_object *user_bo;
565
566 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
567
568 switch (ref_type) {
569 case TTM_REF_SYNCCPU_WRITE:
570 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
571 break;
572 default:
573 WARN_ONCE(true, "Undefined buffer object reference release.\n");
574 }
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591int vmw_user_bo_alloc(struct vmw_private *dev_priv,
592 struct ttm_object_file *tfile,
593 uint32_t size,
594 bool shareable,
595 uint32_t *handle,
596 struct vmw_buffer_object **p_vbo,
597 struct ttm_base_object **p_base)
598{
599 struct vmw_user_buffer_object *user_bo;
600 struct ttm_buffer_object *tmp;
601 int ret;
602
603 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
604 if (unlikely(!user_bo)) {
605 DRM_ERROR("Failed to allocate a buffer.\n");
606 return -ENOMEM;
607 }
608
609 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
610 (dev_priv->has_mob) ?
611 &vmw_sys_placement :
612 &vmw_vram_sys_placement, true,
613 &vmw_user_bo_destroy);
614 if (unlikely(ret != 0))
615 return ret;
616
617 tmp = ttm_bo_reference(&user_bo->vbo.base);
618 ret = ttm_prime_object_init(tfile,
619 size,
620 &user_bo->prime,
621 shareable,
622 ttm_buffer_type,
623 &vmw_user_bo_release,
624 &vmw_user_bo_ref_obj_release);
625 if (unlikely(ret != 0)) {
626 ttm_bo_unref(&tmp);
627 goto out_no_base_object;
628 }
629
630 *p_vbo = &user_bo->vbo;
631 if (p_base) {
632 *p_base = &user_bo->prime.base;
633 kref_get(&(*p_base)->refcount);
634 }
635 *handle = user_bo->prime.base.handle;
636
637out_no_base_object:
638 return ret;
639}
640
641
642
643
644
645
646
647
648
649int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
650 struct ttm_object_file *tfile)
651{
652 struct vmw_user_buffer_object *vmw_user_bo;
653
654 if (unlikely(bo->destroy != vmw_user_bo_destroy))
655 return -EPERM;
656
657 vmw_user_bo = vmw_user_buffer_object(bo);
658
659
660 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
661 return 0;
662
663 DRM_ERROR("Could not grant buffer access.\n");
664 return -EPERM;
665}
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
684 struct ttm_object_file *tfile,
685 uint32_t flags)
686{
687 struct ttm_buffer_object *bo = &user_bo->vbo.base;
688 bool existed;
689 int ret;
690
691 if (flags & drm_vmw_synccpu_allow_cs) {
692 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
693 long lret;
694
695 lret = reservation_object_wait_timeout_rcu
696 (bo->resv, true, true,
697 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
698 if (!lret)
699 return -EBUSY;
700 else if (lret < 0)
701 return lret;
702 return 0;
703 }
704
705 ret = ttm_bo_synccpu_write_grab
706 (bo, !!(flags & drm_vmw_synccpu_dontblock));
707 if (unlikely(ret != 0))
708 return ret;
709
710 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
711 TTM_REF_SYNCCPU_WRITE, &existed, false);
712 if (ret != 0 || existed)
713 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
714
715 return ret;
716}
717
718
719
720
721
722
723
724
725
726static int vmw_user_bo_synccpu_release(uint32_t handle,
727 struct ttm_object_file *tfile,
728 uint32_t flags)
729{
730 if (!(flags & drm_vmw_synccpu_allow_cs))
731 return ttm_ref_object_base_unref(tfile, handle,
732 TTM_REF_SYNCCPU_WRITE);
733
734 return 0;
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
751 struct drm_file *file_priv)
752{
753 struct drm_vmw_synccpu_arg *arg =
754 (struct drm_vmw_synccpu_arg *) data;
755 struct vmw_buffer_object *vbo;
756 struct vmw_user_buffer_object *user_bo;
757 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
758 struct ttm_base_object *buffer_base;
759 int ret;
760
761 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
762 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
763 drm_vmw_synccpu_dontblock |
764 drm_vmw_synccpu_allow_cs)) != 0) {
765 DRM_ERROR("Illegal synccpu flags.\n");
766 return -EINVAL;
767 }
768
769 switch (arg->op) {
770 case drm_vmw_synccpu_grab:
771 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
772 &buffer_base);
773 if (unlikely(ret != 0))
774 return ret;
775
776 user_bo = container_of(vbo, struct vmw_user_buffer_object,
777 vbo);
778 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
779 vmw_bo_unreference(&vbo);
780 ttm_base_object_unref(&buffer_base);
781 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
782 ret != -EBUSY)) {
783 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
784 (unsigned int) arg->handle);
785 return ret;
786 }
787 break;
788 case drm_vmw_synccpu_release:
789 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
790 arg->flags);
791 if (unlikely(ret != 0)) {
792 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
793 (unsigned int) arg->handle);
794 return ret;
795 }
796 break;
797 default:
798 DRM_ERROR("Invalid synccpu operation.\n");
799 return -EINVAL;
800 }
801
802 return 0;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
819 struct drm_file *file_priv)
820{
821 struct vmw_private *dev_priv = vmw_priv(dev);
822 union drm_vmw_alloc_dmabuf_arg *arg =
823 (union drm_vmw_alloc_dmabuf_arg *)data;
824 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
825 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
826 struct vmw_buffer_object *vbo;
827 uint32_t handle;
828 int ret;
829
830 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
831 if (unlikely(ret != 0))
832 return ret;
833
834 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
835 req->size, false, &handle, &vbo,
836 NULL);
837 if (unlikely(ret != 0))
838 goto out_no_bo;
839
840 rep->handle = handle;
841 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
842 rep->cur_gmr_id = handle;
843 rep->cur_gmr_offset = 0;
844
845 vmw_bo_unreference(&vbo);
846
847out_no_bo:
848 ttm_read_unlock(&dev_priv->reservation_sem);
849
850 return ret;
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv)
867{
868 struct drm_vmw_unref_dmabuf_arg *arg =
869 (struct drm_vmw_unref_dmabuf_arg *)data;
870
871 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
872 arg->handle,
873 TTM_REF_USAGE);
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891int vmw_user_bo_lookup(struct ttm_object_file *tfile,
892 uint32_t handle, struct vmw_buffer_object **out,
893 struct ttm_base_object **p_base)
894{
895 struct vmw_user_buffer_object *vmw_user_bo;
896 struct ttm_base_object *base;
897
898 base = ttm_base_object_lookup(tfile, handle);
899 if (unlikely(base == NULL)) {
900 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
901 (unsigned long)handle);
902 return -ESRCH;
903 }
904
905 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
906 ttm_base_object_unref(&base);
907 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
908 (unsigned long)handle);
909 return -EINVAL;
910 }
911
912 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
913 prime.base);
914 (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
915 if (p_base)
916 *p_base = base;
917 else
918 ttm_base_object_unref(&base);
919 *out = &vmw_user_bo->vbo;
920
921 return 0;
922}
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941struct vmw_buffer_object *
942vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
943{
944 struct vmw_user_buffer_object *vmw_user_bo;
945 struct ttm_base_object *base;
946
947 base = ttm_base_object_noref_lookup(tfile, handle);
948 if (!base) {
949 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
950 (unsigned long)handle);
951 return ERR_PTR(-ESRCH);
952 }
953
954 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
955 ttm_base_object_noref_release();
956 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
957 (unsigned long)handle);
958 return ERR_PTR(-EINVAL);
959 }
960
961 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
962 prime.base);
963 return &vmw_user_bo->vbo;
964}
965
966
967
968
969
970
971
972
973
974int vmw_user_bo_reference(struct ttm_object_file *tfile,
975 struct vmw_buffer_object *vbo,
976 uint32_t *handle)
977{
978 struct vmw_user_buffer_object *user_bo;
979
980 if (vbo->base.destroy != vmw_user_bo_destroy)
981 return -EINVAL;
982
983 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
984
985 *handle = user_bo->prime.base.handle;
986 return ttm_ref_object_add(tfile, &user_bo->prime.base,
987 TTM_REF_USAGE, NULL, false);
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1004 struct vmw_fence_obj *fence)
1005{
1006 struct ttm_bo_device *bdev = bo->bdev;
1007
1008 struct vmw_private *dev_priv =
1009 container_of(bdev, struct vmw_private, bdev);
1010
1011 if (fence == NULL) {
1012 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1013 reservation_object_add_excl_fence(bo->resv, &fence->base);
1014 dma_fence_put(&fence->base);
1015 } else
1016 reservation_object_add_excl_fence(bo->resv, &fence->base);
1017}
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032int vmw_dumb_create(struct drm_file *file_priv,
1033 struct drm_device *dev,
1034 struct drm_mode_create_dumb *args)
1035{
1036 struct vmw_private *dev_priv = vmw_priv(dev);
1037 struct vmw_buffer_object *vbo;
1038 int ret;
1039
1040 args->pitch = args->width * ((args->bpp + 7) / 8);
1041 args->size = args->pitch * args->height;
1042
1043 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1044 if (unlikely(ret != 0))
1045 return ret;
1046
1047 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1048 args->size, false, &args->handle,
1049 &vbo, NULL);
1050 if (unlikely(ret != 0))
1051 goto out_no_bo;
1052
1053 vmw_bo_unreference(&vbo);
1054out_no_bo:
1055 ttm_read_unlock(&dev_priv->reservation_sem);
1056 return ret;
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071int vmw_dumb_map_offset(struct drm_file *file_priv,
1072 struct drm_device *dev, uint32_t handle,
1073 uint64_t *offset)
1074{
1075 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1076 struct vmw_buffer_object *out_buf;
1077 int ret;
1078
1079 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1080 if (ret != 0)
1081 return -EINVAL;
1082
1083 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1084 vmw_bo_unreference(&out_buf);
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099int vmw_dumb_destroy(struct drm_file *file_priv,
1100 struct drm_device *dev,
1101 uint32_t handle)
1102{
1103 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1104 handle, TTM_REF_USAGE);
1105}
1106
1107
1108
1109
1110
1111
1112
1113void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1114{
1115
1116 if (bo->destroy != vmw_bo_bo_free &&
1117 bo->destroy != vmw_user_bo_destroy)
1118 return;
1119
1120
1121 vmw_bo_unmap(vmw_buffer_object(bo));
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1136 struct ttm_mem_reg *mem)
1137{
1138 struct vmw_buffer_object *vbo;
1139
1140 if (mem == NULL)
1141 return;
1142
1143
1144 if (bo->destroy != vmw_bo_bo_free &&
1145 bo->destroy != vmw_user_bo_destroy)
1146 return;
1147
1148 vbo = container_of(bo, struct vmw_buffer_object, base);
1149
1150
1151
1152
1153
1154
1155 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1156 vmw_bo_unmap(vbo);
1157
1158
1159
1160
1161
1162
1163 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1164 vmw_resource_unbind_list(vbo);
1165}
1166