1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include "vmwgfx_drv.h"
32#include "ttm_object.h"
33
34
35
36
37
38
39
40
41struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44};
45
46
47
48
49
50
51
52
53
54
55static struct vmw_buffer_object *
56vmw_buffer_object(struct ttm_buffer_object *bo)
57{
58 return container_of(bo, struct vmw_buffer_object, base);
59}
60
61
62
63
64
65
66
67
68
69
70static struct vmw_user_buffer_object *
71vmw_user_buffer_object(struct ttm_buffer_object *bo)
72{
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93{
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 vmw_execbuf_release_pinned_bo(dev_priv);
100
101 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
102 if (unlikely(ret != 0))
103 goto err;
104
105 if (buf->base.pin_count > 0)
106 ret = ttm_bo_mem_compat(placement, bo->resource,
107 &new_flags) == true ? 0 : -EINVAL;
108 else
109 ret = ttm_bo_validate(bo, placement, &ctx);
110
111 if (!ret)
112 vmw_bo_pin_reserved(buf, true);
113
114 ttm_bo_unreserve(bo);
115err:
116 return ret;
117}
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
133 struct vmw_buffer_object *buf,
134 bool interruptible)
135{
136 struct ttm_operation_ctx ctx = {interruptible, false };
137 struct ttm_buffer_object *bo = &buf->base;
138 int ret;
139 uint32_t new_flags;
140
141 vmw_execbuf_release_pinned_bo(dev_priv);
142
143 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
144 if (unlikely(ret != 0))
145 goto err;
146
147 if (buf->base.pin_count > 0) {
148 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
149 &new_flags) == true ? 0 : -EINVAL;
150 goto out_unreserve;
151 }
152
153 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
154 if (likely(ret == 0) || ret == -ERESTARTSYS)
155 goto out_unreserve;
156
157 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
158
159out_unreserve:
160 if (!ret)
161 vmw_bo_pin_reserved(buf, true);
162
163 ttm_bo_unreserve(bo);
164err:
165 return ret;
166}
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
182 struct vmw_buffer_object *buf,
183 bool interruptible)
184{
185 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
186 interruptible);
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
203 struct vmw_buffer_object *buf,
204 bool interruptible)
205{
206 struct ttm_operation_ctx ctx = {interruptible, false };
207 struct ttm_buffer_object *bo = &buf->base;
208 struct ttm_placement placement;
209 struct ttm_place place;
210 int ret = 0;
211 uint32_t new_flags;
212
213 place = vmw_vram_placement.placement[0];
214 place.lpfn = bo->resource->num_pages;
215 placement.num_placement = 1;
216 placement.placement = &place;
217 placement.num_busy_placement = 1;
218 placement.busy_placement = &place;
219
220 vmw_execbuf_release_pinned_bo(dev_priv);
221 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
222 if (unlikely(ret != 0))
223 goto err_unlock;
224
225
226
227
228
229
230 if (bo->resource->mem_type == TTM_PL_VRAM &&
231 bo->resource->start < bo->resource->num_pages &&
232 bo->resource->start > 0 &&
233 buf->base.pin_count == 0) {
234 ctx.interruptible = false;
235 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
236 }
237
238 if (buf->base.pin_count > 0)
239 ret = ttm_bo_mem_compat(&placement, bo->resource,
240 &new_flags) == true ? 0 : -EINVAL;
241 else
242 ret = ttm_bo_validate(bo, &placement, &ctx);
243
244
245 WARN_ON(ret == 0 && bo->resource->start != 0);
246 if (!ret)
247 vmw_bo_pin_reserved(buf, true);
248
249 ttm_bo_unreserve(bo);
250err_unlock:
251
252 return ret;
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267int vmw_bo_unpin(struct vmw_private *dev_priv,
268 struct vmw_buffer_object *buf,
269 bool interruptible)
270{
271 struct ttm_buffer_object *bo = &buf->base;
272 int ret;
273
274 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
275 if (unlikely(ret != 0))
276 goto err;
277
278 vmw_bo_pin_reserved(buf, false);
279
280 ttm_bo_unreserve(bo);
281
282err:
283 return ret;
284}
285
286
287
288
289
290
291
292
293void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
294 SVGAGuestPtr *ptr)
295{
296 if (bo->resource->mem_type == TTM_PL_VRAM) {
297 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
298 ptr->offset = bo->resource->start << PAGE_SHIFT;
299 } else {
300 ptr->gmrId = bo->resource->start;
301 ptr->offset = 0;
302 }
303}
304
305
306
307
308
309
310
311
312
313void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
314{
315 struct ttm_operation_ctx ctx = { false, true };
316 struct ttm_place pl;
317 struct ttm_placement placement;
318 struct ttm_buffer_object *bo = &vbo->base;
319 uint32_t old_mem_type = bo->resource->mem_type;
320 int ret;
321
322 dma_resv_assert_held(bo->base.resv);
323
324 if (pin == !!bo->pin_count)
325 return;
326
327 pl.fpfn = 0;
328 pl.lpfn = 0;
329 pl.mem_type = bo->resource->mem_type;
330 pl.flags = bo->resource->placement;
331
332 memset(&placement, 0, sizeof(placement));
333 placement.num_placement = 1;
334 placement.placement = &pl;
335
336 ret = ttm_bo_validate(bo, &placement, &ctx);
337
338 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
339
340 if (pin)
341 ttm_bo_pin(bo);
342 else
343 ttm_bo_unpin(bo);
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
362{
363 struct ttm_buffer_object *bo = &vbo->base;
364 bool not_used;
365 void *virtual;
366 int ret;
367
368 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
369 if (virtual)
370 return virtual;
371
372 ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
373 if (ret)
374 DRM_ERROR("Buffer object map failed: %d.\n", ret);
375
376 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
377}
378
379
380
381
382
383
384
385
386
387
388void vmw_bo_unmap(struct vmw_buffer_object *vbo)
389{
390 if (vbo->map.bo == NULL)
391 return;
392
393 ttm_bo_kunmap(&vbo->map);
394}
395
396
397
398
399
400
401
402
403
404static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
405 bool user)
406{
407 static size_t struct_size, user_struct_size;
408 size_t num_pages = PFN_UP(size);
409 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
410
411 if (unlikely(struct_size == 0)) {
412 size_t backend_size = ttm_round_pot(vmw_tt_size);
413
414 struct_size = backend_size +
415 ttm_round_pot(sizeof(struct vmw_buffer_object));
416 user_struct_size = backend_size +
417 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
418 TTM_OBJ_EXTRA_SIZE;
419 }
420
421 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
422 page_array_size +=
423 ttm_round_pot(num_pages * sizeof(dma_addr_t));
424
425 return ((user) ? user_struct_size : struct_size) +
426 page_array_size;
427}
428
429
430
431
432
433
434
435void vmw_bo_bo_free(struct ttm_buffer_object *bo)
436{
437 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
438
439 WARN_ON(vmw_bo->dirty);
440 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
441 vmw_bo_unmap(vmw_bo);
442 dma_resv_fini(&bo->base._resv);
443 kfree(vmw_bo);
444}
445
446
447
448
449
450
451
452static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
453{
454 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
455 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
456
457 WARN_ON(vbo->dirty);
458 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
459 vmw_bo_unmap(vbo);
460 ttm_prime_object_kfree(vmw_user_bo, prime);
461}
462
463
464
465
466
467
468
469
470
471
472
473int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
474 struct ttm_placement *placement,
475 struct ttm_buffer_object **p_bo)
476{
477 struct ttm_operation_ctx ctx = { false, false };
478 struct ttm_buffer_object *bo;
479 size_t acc_size;
480 int ret;
481
482 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
483 if (unlikely(!bo))
484 return -ENOMEM;
485
486 acc_size = ttm_round_pot(sizeof(*bo));
487 acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
488 acc_size += ttm_round_pot(sizeof(struct ttm_tt));
489
490 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
491 if (unlikely(ret))
492 goto error_free;
493
494
495 bo->base.size = size;
496 dma_resv_init(&bo->base._resv);
497 drm_vma_node_reset(&bo->base.vma_node);
498
499 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
500 ttm_bo_type_device, placement, 0,
501 &ctx, NULL, NULL, NULL);
502 if (unlikely(ret))
503 goto error_account;
504
505 ttm_bo_pin(bo);
506 ttm_bo_unreserve(bo);
507 *p_bo = bo;
508
509 return 0;
510
511error_account:
512 ttm_mem_global_free(&ttm_mem_glob, acc_size);
513
514error_free:
515 kfree(bo);
516 return ret;
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533int vmw_bo_init(struct vmw_private *dev_priv,
534 struct vmw_buffer_object *vmw_bo,
535 size_t size, struct ttm_placement *placement,
536 bool interruptible, bool pin,
537 void (*bo_free)(struct ttm_buffer_object *bo))
538{
539 struct ttm_operation_ctx ctx = { interruptible, false };
540 struct ttm_device *bdev = &dev_priv->bdev;
541 size_t acc_size;
542 int ret;
543 bool user = (bo_free == &vmw_user_bo_destroy);
544
545 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
546
547 acc_size = vmw_bo_acc_size(dev_priv, size, user);
548 memset(vmw_bo, 0, sizeof(*vmw_bo));
549 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
550 vmw_bo->base.priority = 3;
551 vmw_bo->res_tree = RB_ROOT;
552
553 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
554 if (unlikely(ret))
555 return ret;
556
557 vmw_bo->base.base.size = size;
558 dma_resv_init(&vmw_bo->base.base._resv);
559 drm_vma_node_reset(&vmw_bo->base.base.vma_node);
560
561 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
562 ttm_bo_type_device, placement,
563 0, &ctx, NULL, NULL, bo_free);
564 if (unlikely(ret)) {
565 ttm_mem_global_free(&ttm_mem_glob, acc_size);
566 return ret;
567 }
568
569 if (pin)
570 ttm_bo_pin(&vmw_bo->base);
571 ttm_bo_unreserve(&vmw_bo->base);
572 return 0;
573}
574
575
576
577
578
579
580
581
582
583
584
585static void vmw_user_bo_release(struct ttm_base_object **p_base)
586{
587 struct vmw_user_buffer_object *vmw_user_bo;
588 struct ttm_base_object *base = *p_base;
589
590 *p_base = NULL;
591
592 if (unlikely(base == NULL))
593 return;
594
595 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
596 prime.base);
597 ttm_bo_put(&vmw_user_bo->vbo.base);
598}
599
600
601
602
603
604
605
606
607
608
609
610
611static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
612 enum ttm_ref_type ref_type)
613{
614 struct vmw_user_buffer_object *user_bo;
615
616 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
617
618 switch (ref_type) {
619 case TTM_REF_SYNCCPU_WRITE:
620 atomic_dec(&user_bo->vbo.cpu_writers);
621 break;
622 default:
623 WARN_ONCE(true, "Undefined buffer object reference release.\n");
624 }
625}
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642int vmw_user_bo_alloc(struct vmw_private *dev_priv,
643 struct ttm_object_file *tfile,
644 uint32_t size,
645 bool shareable,
646 uint32_t *handle,
647 struct vmw_buffer_object **p_vbo,
648 struct ttm_base_object **p_base)
649{
650 struct vmw_user_buffer_object *user_bo;
651 int ret;
652
653 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
654 if (unlikely(!user_bo)) {
655 DRM_ERROR("Failed to allocate a buffer.\n");
656 return -ENOMEM;
657 }
658
659 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
660 (dev_priv->has_mob) ?
661 &vmw_sys_placement :
662 &vmw_vram_sys_placement, true, false,
663 &vmw_user_bo_destroy);
664 if (unlikely(ret != 0))
665 return ret;
666
667 ttm_bo_get(&user_bo->vbo.base);
668 ret = ttm_prime_object_init(tfile,
669 size,
670 &user_bo->prime,
671 shareable,
672 ttm_buffer_type,
673 &vmw_user_bo_release,
674 &vmw_user_bo_ref_obj_release);
675 if (unlikely(ret != 0)) {
676 ttm_bo_put(&user_bo->vbo.base);
677 goto out_no_base_object;
678 }
679
680 *p_vbo = &user_bo->vbo;
681 if (p_base) {
682 *p_base = &user_bo->prime.base;
683 kref_get(&(*p_base)->refcount);
684 }
685 *handle = user_bo->prime.base.handle;
686
687out_no_base_object:
688 return ret;
689}
690
691
692
693
694
695
696
697
698
699int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
700 struct ttm_object_file *tfile)
701{
702 struct vmw_user_buffer_object *vmw_user_bo;
703
704 if (unlikely(bo->destroy != vmw_user_bo_destroy))
705 return -EPERM;
706
707 vmw_user_bo = vmw_user_buffer_object(bo);
708
709
710 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
711 return 0;
712
713 DRM_ERROR("Could not grant buffer access.\n");
714 return -EPERM;
715}
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
734 struct ttm_object_file *tfile,
735 uint32_t flags)
736{
737 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
738 struct ttm_buffer_object *bo = &user_bo->vbo.base;
739 bool existed;
740 int ret;
741
742 if (flags & drm_vmw_synccpu_allow_cs) {
743 long lret;
744
745 lret = dma_resv_wait_timeout(bo->base.resv, true, true,
746 nonblock ? 0 :
747 MAX_SCHEDULE_TIMEOUT);
748 if (!lret)
749 return -EBUSY;
750 else if (lret < 0)
751 return lret;
752 return 0;
753 }
754
755 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
756 if (unlikely(ret != 0))
757 return ret;
758
759 ret = ttm_bo_wait(bo, true, nonblock);
760 if (likely(ret == 0))
761 atomic_inc(&user_bo->vbo.cpu_writers);
762
763 ttm_bo_unreserve(bo);
764 if (unlikely(ret != 0))
765 return ret;
766
767 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
768 TTM_REF_SYNCCPU_WRITE, &existed, false);
769 if (ret != 0 || existed)
770 atomic_dec(&user_bo->vbo.cpu_writers);
771
772 return ret;
773}
774
775
776
777
778
779
780
781
782
783static int vmw_user_bo_synccpu_release(uint32_t handle,
784 struct ttm_object_file *tfile,
785 uint32_t flags)
786{
787 if (!(flags & drm_vmw_synccpu_allow_cs))
788 return ttm_ref_object_base_unref(tfile, handle,
789 TTM_REF_SYNCCPU_WRITE);
790
791 return 0;
792}
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
808 struct drm_file *file_priv)
809{
810 struct drm_vmw_synccpu_arg *arg =
811 (struct drm_vmw_synccpu_arg *) data;
812 struct vmw_buffer_object *vbo;
813 struct vmw_user_buffer_object *user_bo;
814 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
815 struct ttm_base_object *buffer_base;
816 int ret;
817
818 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
819 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
820 drm_vmw_synccpu_dontblock |
821 drm_vmw_synccpu_allow_cs)) != 0) {
822 DRM_ERROR("Illegal synccpu flags.\n");
823 return -EINVAL;
824 }
825
826 switch (arg->op) {
827 case drm_vmw_synccpu_grab:
828 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
829 &buffer_base);
830 if (unlikely(ret != 0))
831 return ret;
832
833 user_bo = container_of(vbo, struct vmw_user_buffer_object,
834 vbo);
835 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
836 vmw_bo_unreference(&vbo);
837 ttm_base_object_unref(&buffer_base);
838 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
839 ret != -EBUSY)) {
840 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
841 (unsigned int) arg->handle);
842 return ret;
843 }
844 break;
845 case drm_vmw_synccpu_release:
846 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
847 arg->flags);
848 if (unlikely(ret != 0)) {
849 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
850 (unsigned int) arg->handle);
851 return ret;
852 }
853 break;
854 default:
855 DRM_ERROR("Invalid synccpu operation.\n");
856 return -EINVAL;
857 }
858
859 return 0;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file_priv)
877{
878 struct vmw_private *dev_priv = vmw_priv(dev);
879 union drm_vmw_alloc_dmabuf_arg *arg =
880 (union drm_vmw_alloc_dmabuf_arg *)data;
881 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
882 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
883 struct vmw_buffer_object *vbo;
884 uint32_t handle;
885 int ret;
886
887 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
888 req->size, false, &handle, &vbo,
889 NULL);
890 if (unlikely(ret != 0))
891 goto out_no_bo;
892
893 rep->handle = handle;
894 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
895 rep->cur_gmr_id = handle;
896 rep->cur_gmr_offset = 0;
897
898 vmw_bo_unreference(&vbo);
899
900out_no_bo:
901
902 return ret;
903}
904
905
906
907
908
909
910
911
912
913
914
915
916
917int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
919{
920 struct drm_vmw_unref_dmabuf_arg *arg =
921 (struct drm_vmw_unref_dmabuf_arg *)data;
922
923 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
924 arg->handle,
925 TTM_REF_USAGE);
926}
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943int vmw_user_bo_lookup(struct ttm_object_file *tfile,
944 uint32_t handle, struct vmw_buffer_object **out,
945 struct ttm_base_object **p_base)
946{
947 struct vmw_user_buffer_object *vmw_user_bo;
948 struct ttm_base_object *base;
949
950 base = ttm_base_object_lookup(tfile, handle);
951 if (unlikely(base == NULL)) {
952 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
953 (unsigned long)handle);
954 return -ESRCH;
955 }
956
957 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
958 ttm_base_object_unref(&base);
959 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
960 (unsigned long)handle);
961 return -EINVAL;
962 }
963
964 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
965 prime.base);
966 ttm_bo_get(&vmw_user_bo->vbo.base);
967 if (p_base)
968 *p_base = base;
969 else
970 ttm_base_object_unref(&base);
971 *out = &vmw_user_bo->vbo;
972
973 return 0;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993struct vmw_buffer_object *
994vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
995{
996 struct vmw_user_buffer_object *vmw_user_bo;
997 struct ttm_base_object *base;
998
999 base = ttm_base_object_noref_lookup(tfile, handle);
1000 if (!base) {
1001 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1002 (unsigned long)handle);
1003 return ERR_PTR(-ESRCH);
1004 }
1005
1006 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1007 ttm_base_object_noref_release();
1008 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1009 (unsigned long)handle);
1010 return ERR_PTR(-EINVAL);
1011 }
1012
1013 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1014 prime.base);
1015 return &vmw_user_bo->vbo;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026int vmw_user_bo_reference(struct ttm_object_file *tfile,
1027 struct vmw_buffer_object *vbo,
1028 uint32_t *handle)
1029{
1030 struct vmw_user_buffer_object *user_bo;
1031
1032 if (vbo->base.destroy != vmw_user_bo_destroy)
1033 return -EINVAL;
1034
1035 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1036
1037 *handle = user_bo->prime.base.handle;
1038 return ttm_ref_object_add(tfile, &user_bo->prime.base,
1039 TTM_REF_USAGE, NULL, false);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1056 struct vmw_fence_obj *fence)
1057{
1058 struct ttm_device *bdev = bo->bdev;
1059
1060 struct vmw_private *dev_priv =
1061 container_of(bdev, struct vmw_private, bdev);
1062
1063 if (fence == NULL) {
1064 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1065 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1066 dma_fence_put(&fence->base);
1067 } else
1068 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084int vmw_dumb_create(struct drm_file *file_priv,
1085 struct drm_device *dev,
1086 struct drm_mode_create_dumb *args)
1087{
1088 struct vmw_private *dev_priv = vmw_priv(dev);
1089 struct vmw_buffer_object *vbo;
1090 int ret;
1091
1092 args->pitch = args->width * ((args->bpp + 7) / 8);
1093 args->size = args->pitch * args->height;
1094
1095 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1096 args->size, false, &args->handle,
1097 &vbo, NULL);
1098 if (unlikely(ret != 0))
1099 goto out_no_bo;
1100
1101 vmw_bo_unreference(&vbo);
1102out_no_bo:
1103 return ret;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118int vmw_dumb_map_offset(struct drm_file *file_priv,
1119 struct drm_device *dev, uint32_t handle,
1120 uint64_t *offset)
1121{
1122 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1123 struct vmw_buffer_object *out_buf;
1124 int ret;
1125
1126 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1127 if (ret != 0)
1128 return -EINVAL;
1129
1130 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1131 vmw_bo_unreference(&out_buf);
1132 return 0;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146int vmw_dumb_destroy(struct drm_file *file_priv,
1147 struct drm_device *dev,
1148 uint32_t handle)
1149{
1150 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1151 handle, TTM_REF_USAGE);
1152}
1153
1154
1155
1156
1157
1158
1159
1160void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1161{
1162
1163 if (bo->destroy != vmw_bo_bo_free &&
1164 bo->destroy != vmw_user_bo_destroy)
1165 return;
1166
1167
1168 vmw_bo_unmap(vmw_buffer_object(bo));
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1183 struct ttm_resource *mem)
1184{
1185 struct vmw_buffer_object *vbo;
1186
1187
1188 if (bo->destroy != vmw_bo_bo_free &&
1189 bo->destroy != vmw_user_bo_destroy)
1190 return;
1191
1192 vbo = container_of(bo, struct vmw_buffer_object, base);
1193
1194
1195
1196
1197
1198
1199 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
1200 vmw_bo_unmap(vbo);
1201
1202
1203
1204
1205
1206
1207 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
1208 vmw_resource_unbind_list(vbo);
1209}
1210