1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include <drm/drmP.h>
32#include "vmwgfx_drv.h"
33#include "ttm_object.h"
34
35
36
37
38
39
40
41
42struct vmw_user_buffer_object {
43 struct ttm_prime_object prime;
44 struct vmw_buffer_object vbo;
45};
46
47
48
49
50
51
52
53
54
55
56static struct vmw_buffer_object *
57vmw_buffer_object(struct ttm_buffer_object *bo)
58{
59 return container_of(bo, struct vmw_buffer_object, base);
60}
61
62
63
64
65
66
67
68
69
70
71static struct vmw_user_buffer_object *
72vmw_user_buffer_object(struct ttm_buffer_object *bo)
73{
74 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
75
76 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
77}
78
79
80
81
82
83
84
85
86
87
88
89
90int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
91 struct vmw_buffer_object *buf,
92 struct ttm_placement *placement,
93 bool interruptible)
94{
95 struct ttm_operation_ctx ctx = {interruptible, false };
96 struct ttm_buffer_object *bo = &buf->base;
97 int ret;
98 uint32_t new_flags;
99
100 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
101 if (unlikely(ret != 0))
102 return ret;
103
104 vmw_execbuf_release_pinned_bo(dev_priv);
105
106 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
107 if (unlikely(ret != 0))
108 goto err;
109
110 if (buf->pin_count > 0)
111 ret = ttm_bo_mem_compat(placement, &bo->mem,
112 &new_flags) == true ? 0 : -EINVAL;
113 else
114 ret = ttm_bo_validate(bo, placement, &ctx);
115
116 if (!ret)
117 vmw_bo_pin_reserved(buf, true);
118
119 ttm_bo_unreserve(bo);
120
121err:
122 ttm_write_unlock(&dev_priv->reservation_sem);
123 return ret;
124}
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
141 struct vmw_buffer_object *buf,
142 bool interruptible)
143{
144 struct ttm_operation_ctx ctx = {interruptible, false };
145 struct ttm_buffer_object *bo = &buf->base;
146 int ret;
147 uint32_t new_flags;
148
149 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
150 if (unlikely(ret != 0))
151 return ret;
152
153 vmw_execbuf_release_pinned_bo(dev_priv);
154
155 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
156 if (unlikely(ret != 0))
157 goto err;
158
159 if (buf->pin_count > 0) {
160 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
161 &new_flags) == true ? 0 : -EINVAL;
162 goto out_unreserve;
163 }
164
165 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
166 if (likely(ret == 0) || ret == -ERESTARTSYS)
167 goto out_unreserve;
168
169 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
170
171out_unreserve:
172 if (!ret)
173 vmw_bo_pin_reserved(buf, true);
174
175 ttm_bo_unreserve(bo);
176err:
177 ttm_write_unlock(&dev_priv->reservation_sem);
178 return ret;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
195 struct vmw_buffer_object *buf,
196 bool interruptible)
197{
198 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
199 interruptible);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
216 struct vmw_buffer_object *buf,
217 bool interruptible)
218{
219 struct ttm_operation_ctx ctx = {interruptible, false };
220 struct ttm_buffer_object *bo = &buf->base;
221 struct ttm_placement placement;
222 struct ttm_place place;
223 int ret = 0;
224 uint32_t new_flags;
225
226 place = vmw_vram_placement.placement[0];
227 place.lpfn = bo->num_pages;
228 placement.num_placement = 1;
229 placement.placement = &place;
230 placement.num_busy_placement = 1;
231 placement.busy_placement = &place;
232
233 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
234 if (unlikely(ret != 0))
235 return ret;
236
237 vmw_execbuf_release_pinned_bo(dev_priv);
238 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
239 if (unlikely(ret != 0))
240 goto err_unlock;
241
242
243
244
245
246
247 if (bo->mem.mem_type == TTM_PL_VRAM &&
248 bo->mem.start < bo->num_pages &&
249 bo->mem.start > 0 &&
250 buf->pin_count == 0) {
251 ctx.interruptible = false;
252 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
253 }
254
255 if (buf->pin_count > 0)
256 ret = ttm_bo_mem_compat(&placement, &bo->mem,
257 &new_flags) == true ? 0 : -EINVAL;
258 else
259 ret = ttm_bo_validate(bo, &placement, &ctx);
260
261
262 WARN_ON(ret == 0 && bo->offset != 0);
263 if (!ret)
264 vmw_bo_pin_reserved(buf, true);
265
266 ttm_bo_unreserve(bo);
267err_unlock:
268 ttm_write_unlock(&dev_priv->reservation_sem);
269
270 return ret;
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285int vmw_bo_unpin(struct vmw_private *dev_priv,
286 struct vmw_buffer_object *buf,
287 bool interruptible)
288{
289 struct ttm_buffer_object *bo = &buf->base;
290 int ret;
291
292 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
293 if (unlikely(ret != 0))
294 return ret;
295
296 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
297 if (unlikely(ret != 0))
298 goto err;
299
300 vmw_bo_pin_reserved(buf, false);
301
302 ttm_bo_unreserve(bo);
303
304err:
305 ttm_read_unlock(&dev_priv->reservation_sem);
306 return ret;
307}
308
309
310
311
312
313
314
315
316void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
317 SVGAGuestPtr *ptr)
318{
319 if (bo->mem.mem_type == TTM_PL_VRAM) {
320 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
321 ptr->offset = bo->offset;
322 } else {
323 ptr->gmrId = bo->mem.start;
324 ptr->offset = 0;
325 }
326}
327
328
329
330
331
332
333
334
335
336void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
337{
338 struct ttm_operation_ctx ctx = { false, true };
339 struct ttm_place pl;
340 struct ttm_placement placement;
341 struct ttm_buffer_object *bo = &vbo->base;
342 uint32_t old_mem_type = bo->mem.mem_type;
343 int ret;
344
345 lockdep_assert_held(&bo->resv->lock.base);
346
347 if (pin) {
348 if (vbo->pin_count++ > 0)
349 return;
350 } else {
351 WARN_ON(vbo->pin_count <= 0);
352 if (--vbo->pin_count > 0)
353 return;
354 }
355
356 pl.fpfn = 0;
357 pl.lpfn = 0;
358 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
359 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
360 if (pin)
361 pl.flags |= TTM_PL_FLAG_NO_EVICT;
362
363 memset(&placement, 0, sizeof(placement));
364 placement.num_placement = 1;
365 placement.placement = &pl;
366
367 ret = ttm_bo_validate(bo, &placement, &ctx);
368
369 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
389{
390 struct ttm_buffer_object *bo = &vbo->base;
391 bool not_used;
392 void *virtual;
393 int ret;
394
395 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
396 if (virtual)
397 return virtual;
398
399 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
400 if (ret)
401 DRM_ERROR("Buffer object map failed: %d.\n", ret);
402
403 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
404}
405
406
407
408
409
410
411
412
413
414
415void vmw_bo_unmap(struct vmw_buffer_object *vbo)
416{
417 if (vbo->map.bo == NULL)
418 return;
419
420 ttm_bo_kunmap(&vbo->map);
421}
422
423
424
425
426
427
428
429
430
431static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
432 bool user)
433{
434 static size_t struct_size, user_struct_size;
435 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
436 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
437
438 if (unlikely(struct_size == 0)) {
439 size_t backend_size = ttm_round_pot(vmw_tt_size);
440
441 struct_size = backend_size +
442 ttm_round_pot(sizeof(struct vmw_buffer_object));
443 user_struct_size = backend_size +
444 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
445 TTM_OBJ_EXTRA_SIZE;
446 }
447
448 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
449 page_array_size +=
450 ttm_round_pot(num_pages * sizeof(dma_addr_t));
451
452 return ((user) ? user_struct_size : struct_size) +
453 page_array_size;
454}
455
456
457
458
459
460
461
462void vmw_bo_bo_free(struct ttm_buffer_object *bo)
463{
464 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
465
466 vmw_bo_unmap(vmw_bo);
467 kfree(vmw_bo);
468}
469
470
471
472
473
474
475
476static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
477{
478 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
479
480 vmw_bo_unmap(&vmw_user_bo->vbo);
481 ttm_prime_object_kfree(vmw_user_bo, prime);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498int vmw_bo_init(struct vmw_private *dev_priv,
499 struct vmw_buffer_object *vmw_bo,
500 size_t size, struct ttm_placement *placement,
501 bool interruptible,
502 void (*bo_free)(struct ttm_buffer_object *bo))
503{
504 struct ttm_bo_device *bdev = &dev_priv->bdev;
505 size_t acc_size;
506 int ret;
507 bool user = (bo_free == &vmw_user_bo_destroy);
508
509 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
510
511 acc_size = vmw_bo_acc_size(dev_priv, size, user);
512 memset(vmw_bo, 0, sizeof(*vmw_bo));
513
514 INIT_LIST_HEAD(&vmw_bo->res_list);
515
516 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
517 ttm_bo_type_device, placement,
518 0, interruptible, acc_size,
519 NULL, NULL, bo_free);
520 return ret;
521}
522
523
524
525
526
527
528
529
530
531
532
533static void vmw_user_bo_release(struct ttm_base_object **p_base)
534{
535 struct vmw_user_buffer_object *vmw_user_bo;
536 struct ttm_base_object *base = *p_base;
537
538 *p_base = NULL;
539
540 if (unlikely(base == NULL))
541 return;
542
543 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
544 prime.base);
545 ttm_bo_put(&vmw_user_bo->vbo.base);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
560 enum ttm_ref_type ref_type)
561{
562 struct vmw_user_buffer_object *user_bo;
563
564 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
565
566 switch (ref_type) {
567 case TTM_REF_SYNCCPU_WRITE:
568 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
569 break;
570 default:
571 WARN_ONCE(true, "Undefined buffer object reference release.\n");
572 }
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589int vmw_user_bo_alloc(struct vmw_private *dev_priv,
590 struct ttm_object_file *tfile,
591 uint32_t size,
592 bool shareable,
593 uint32_t *handle,
594 struct vmw_buffer_object **p_vbo,
595 struct ttm_base_object **p_base)
596{
597 struct vmw_user_buffer_object *user_bo;
598 int ret;
599
600 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
601 if (unlikely(!user_bo)) {
602 DRM_ERROR("Failed to allocate a buffer.\n");
603 return -ENOMEM;
604 }
605
606 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
607 (dev_priv->has_mob) ?
608 &vmw_sys_placement :
609 &vmw_vram_sys_placement, true,
610 &vmw_user_bo_destroy);
611 if (unlikely(ret != 0))
612 return ret;
613
614 ttm_bo_get(&user_bo->vbo.base);
615 ret = ttm_prime_object_init(tfile,
616 size,
617 &user_bo->prime,
618 shareable,
619 ttm_buffer_type,
620 &vmw_user_bo_release,
621 &vmw_user_bo_ref_obj_release);
622 if (unlikely(ret != 0)) {
623 ttm_bo_put(&user_bo->vbo.base);
624 goto out_no_base_object;
625 }
626
627 *p_vbo = &user_bo->vbo;
628 if (p_base) {
629 *p_base = &user_bo->prime.base;
630 kref_get(&(*p_base)->refcount);
631 }
632 *handle = user_bo->prime.base.handle;
633
634out_no_base_object:
635 return ret;
636}
637
638
639
640
641
642
643
644
645
646int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
647 struct ttm_object_file *tfile)
648{
649 struct vmw_user_buffer_object *vmw_user_bo;
650
651 if (unlikely(bo->destroy != vmw_user_bo_destroy))
652 return -EPERM;
653
654 vmw_user_bo = vmw_user_buffer_object(bo);
655
656
657 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
658 return 0;
659
660 DRM_ERROR("Could not grant buffer access.\n");
661 return -EPERM;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
681 struct ttm_object_file *tfile,
682 uint32_t flags)
683{
684 struct ttm_buffer_object *bo = &user_bo->vbo.base;
685 bool existed;
686 int ret;
687
688 if (flags & drm_vmw_synccpu_allow_cs) {
689 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
690 long lret;
691
692 lret = reservation_object_wait_timeout_rcu
693 (bo->resv, true, true,
694 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
695 if (!lret)
696 return -EBUSY;
697 else if (lret < 0)
698 return lret;
699 return 0;
700 }
701
702 ret = ttm_bo_synccpu_write_grab
703 (bo, !!(flags & drm_vmw_synccpu_dontblock));
704 if (unlikely(ret != 0))
705 return ret;
706
707 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
708 TTM_REF_SYNCCPU_WRITE, &existed, false);
709 if (ret != 0 || existed)
710 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
711
712 return ret;
713}
714
715
716
717
718
719
720
721
722
723static int vmw_user_bo_synccpu_release(uint32_t handle,
724 struct ttm_object_file *tfile,
725 uint32_t flags)
726{
727 if (!(flags & drm_vmw_synccpu_allow_cs))
728 return ttm_ref_object_base_unref(tfile, handle,
729 TTM_REF_SYNCCPU_WRITE);
730
731 return 0;
732}
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
748 struct drm_file *file_priv)
749{
750 struct drm_vmw_synccpu_arg *arg =
751 (struct drm_vmw_synccpu_arg *) data;
752 struct vmw_buffer_object *vbo;
753 struct vmw_user_buffer_object *user_bo;
754 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
755 struct ttm_base_object *buffer_base;
756 int ret;
757
758 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
759 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
760 drm_vmw_synccpu_dontblock |
761 drm_vmw_synccpu_allow_cs)) != 0) {
762 DRM_ERROR("Illegal synccpu flags.\n");
763 return -EINVAL;
764 }
765
766 switch (arg->op) {
767 case drm_vmw_synccpu_grab:
768 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
769 &buffer_base);
770 if (unlikely(ret != 0))
771 return ret;
772
773 user_bo = container_of(vbo, struct vmw_user_buffer_object,
774 vbo);
775 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
776 vmw_bo_unreference(&vbo);
777 ttm_base_object_unref(&buffer_base);
778 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
779 ret != -EBUSY)) {
780 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
781 (unsigned int) arg->handle);
782 return ret;
783 }
784 break;
785 case drm_vmw_synccpu_release:
786 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
787 arg->flags);
788 if (unlikely(ret != 0)) {
789 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
790 (unsigned int) arg->handle);
791 return ret;
792 }
793 break;
794 default:
795 DRM_ERROR("Invalid synccpu operation.\n");
796 return -EINVAL;
797 }
798
799 return 0;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file_priv)
817{
818 struct vmw_private *dev_priv = vmw_priv(dev);
819 union drm_vmw_alloc_dmabuf_arg *arg =
820 (union drm_vmw_alloc_dmabuf_arg *)data;
821 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
822 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
823 struct vmw_buffer_object *vbo;
824 uint32_t handle;
825 int ret;
826
827 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
828 if (unlikely(ret != 0))
829 return ret;
830
831 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
832 req->size, false, &handle, &vbo,
833 NULL);
834 if (unlikely(ret != 0))
835 goto out_no_bo;
836
837 rep->handle = handle;
838 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
839 rep->cur_gmr_id = handle;
840 rep->cur_gmr_offset = 0;
841
842 vmw_bo_unreference(&vbo);
843
844out_no_bo:
845 ttm_read_unlock(&dev_priv->reservation_sem);
846
847 return ret;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
863 struct drm_file *file_priv)
864{
865 struct drm_vmw_unref_dmabuf_arg *arg =
866 (struct drm_vmw_unref_dmabuf_arg *)data;
867
868 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
869 arg->handle,
870 TTM_REF_USAGE);
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888int vmw_user_bo_lookup(struct ttm_object_file *tfile,
889 uint32_t handle, struct vmw_buffer_object **out,
890 struct ttm_base_object **p_base)
891{
892 struct vmw_user_buffer_object *vmw_user_bo;
893 struct ttm_base_object *base;
894
895 base = ttm_base_object_lookup(tfile, handle);
896 if (unlikely(base == NULL)) {
897 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
898 (unsigned long)handle);
899 return -ESRCH;
900 }
901
902 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
903 ttm_base_object_unref(&base);
904 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
905 (unsigned long)handle);
906 return -EINVAL;
907 }
908
909 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
910 prime.base);
911 ttm_bo_get(&vmw_user_bo->vbo.base);
912 if (p_base)
913 *p_base = base;
914 else
915 ttm_base_object_unref(&base);
916 *out = &vmw_user_bo->vbo;
917
918 return 0;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938struct vmw_buffer_object *
939vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
940{
941 struct vmw_user_buffer_object *vmw_user_bo;
942 struct ttm_base_object *base;
943
944 base = ttm_base_object_noref_lookup(tfile, handle);
945 if (!base) {
946 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
947 (unsigned long)handle);
948 return ERR_PTR(-ESRCH);
949 }
950
951 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
952 ttm_base_object_noref_release();
953 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
954 (unsigned long)handle);
955 return ERR_PTR(-EINVAL);
956 }
957
958 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
959 prime.base);
960 return &vmw_user_bo->vbo;
961}
962
963
964
965
966
967
968
969
970
971int vmw_user_bo_reference(struct ttm_object_file *tfile,
972 struct vmw_buffer_object *vbo,
973 uint32_t *handle)
974{
975 struct vmw_user_buffer_object *user_bo;
976
977 if (vbo->base.destroy != vmw_user_bo_destroy)
978 return -EINVAL;
979
980 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
981
982 *handle = user_bo->prime.base.handle;
983 return ttm_ref_object_add(tfile, &user_bo->prime.base,
984 TTM_REF_USAGE, NULL, false);
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1001 struct vmw_fence_obj *fence)
1002{
1003 struct ttm_bo_device *bdev = bo->bdev;
1004
1005 struct vmw_private *dev_priv =
1006 container_of(bdev, struct vmw_private, bdev);
1007
1008 if (fence == NULL) {
1009 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1010 reservation_object_add_excl_fence(bo->resv, &fence->base);
1011 dma_fence_put(&fence->base);
1012 } else
1013 reservation_object_add_excl_fence(bo->resv, &fence->base);
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029int vmw_dumb_create(struct drm_file *file_priv,
1030 struct drm_device *dev,
1031 struct drm_mode_create_dumb *args)
1032{
1033 struct vmw_private *dev_priv = vmw_priv(dev);
1034 struct vmw_buffer_object *vbo;
1035 int ret;
1036
1037 args->pitch = args->width * ((args->bpp + 7) / 8);
1038 args->size = args->pitch * args->height;
1039
1040 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1041 if (unlikely(ret != 0))
1042 return ret;
1043
1044 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1045 args->size, false, &args->handle,
1046 &vbo, NULL);
1047 if (unlikely(ret != 0))
1048 goto out_no_bo;
1049
1050 vmw_bo_unreference(&vbo);
1051out_no_bo:
1052 ttm_read_unlock(&dev_priv->reservation_sem);
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068int vmw_dumb_map_offset(struct drm_file *file_priv,
1069 struct drm_device *dev, uint32_t handle,
1070 uint64_t *offset)
1071{
1072 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1073 struct vmw_buffer_object *out_buf;
1074 int ret;
1075
1076 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1077 if (ret != 0)
1078 return -EINVAL;
1079
1080 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1081 vmw_bo_unreference(&out_buf);
1082 return 0;
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096int vmw_dumb_destroy(struct drm_file *file_priv,
1097 struct drm_device *dev,
1098 uint32_t handle)
1099{
1100 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1101 handle, TTM_REF_USAGE);
1102}
1103
1104
1105
1106
1107
1108
1109
1110void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1111{
1112
1113 if (bo->destroy != vmw_bo_bo_free &&
1114 bo->destroy != vmw_user_bo_destroy)
1115 return;
1116
1117
1118 vmw_bo_unmap(vmw_buffer_object(bo));
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1133 struct ttm_mem_reg *mem)
1134{
1135 struct vmw_buffer_object *vbo;
1136
1137 if (mem == NULL)
1138 return;
1139
1140
1141 if (bo->destroy != vmw_bo_bo_free &&
1142 bo->destroy != vmw_user_bo_destroy)
1143 return;
1144
1145 vbo = container_of(bo, struct vmw_buffer_object, base);
1146
1147
1148
1149
1150
1151
1152 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1153 vmw_bo_unmap(vbo);
1154
1155
1156
1157
1158
1159
1160 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1161 vmw_resource_unbind_list(vbo);
1162}
1163