1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include "vmwgfx_drv.h"
32#include "ttm_object.h"
33
34
35
36
37
38
39
40
41struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44};
45
46
47
48
49
50
51
52
53
54
55static struct vmw_buffer_object *
56vmw_buffer_object(struct ttm_buffer_object *bo)
57{
58 return container_of(bo, struct vmw_buffer_object, base);
59}
60
61
62
63
64
65
66
67
68
69
70static struct vmw_user_buffer_object *
71vmw_user_buffer_object(struct ttm_buffer_object *bo)
72{
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93{
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->base.pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 struct vmw_buffer_object *buf,
141 bool interruptible)
142{
143 struct ttm_operation_ctx ctx = {interruptible, false };
144 struct ttm_buffer_object *bo = &buf->base;
145 int ret;
146 uint32_t new_flags;
147
148 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 if (unlikely(ret != 0))
150 return ret;
151
152 vmw_execbuf_release_pinned_bo(dev_priv);
153
154 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 if (unlikely(ret != 0))
156 goto err;
157
158 if (buf->base.pin_count > 0) {
159 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 &new_flags) == true ? 0 : -EINVAL;
161 goto out_unreserve;
162 }
163
164 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 if (likely(ret == 0) || ret == -ERESTARTSYS)
166 goto out_unreserve;
167
168 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169
170out_unreserve:
171 if (!ret)
172 vmw_bo_pin_reserved(buf, true);
173
174 ttm_bo_unreserve(bo);
175err:
176 ttm_write_unlock(&dev_priv->reservation_sem);
177 return ret;
178}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 struct vmw_buffer_object *buf,
195 bool interruptible)
196{
197 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 interruptible);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 struct vmw_buffer_object *buf,
216 bool interruptible)
217{
218 struct ttm_operation_ctx ctx = {interruptible, false };
219 struct ttm_buffer_object *bo = &buf->base;
220 struct ttm_placement placement;
221 struct ttm_place place;
222 int ret = 0;
223 uint32_t new_flags;
224
225 place = vmw_vram_placement.placement[0];
226 place.lpfn = bo->num_pages;
227 placement.num_placement = 1;
228 placement.placement = &place;
229 placement.num_busy_placement = 1;
230 placement.busy_placement = &place;
231
232 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 if (unlikely(ret != 0))
234 return ret;
235
236 vmw_execbuf_release_pinned_bo(dev_priv);
237 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 if (unlikely(ret != 0))
239 goto err_unlock;
240
241
242
243
244
245
246 if (bo->mem.mem_type == TTM_PL_VRAM &&
247 bo->mem.start < bo->num_pages &&
248 bo->mem.start > 0 &&
249 buf->base.pin_count == 0) {
250 ctx.interruptible = false;
251 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 }
253
254 if (buf->base.pin_count > 0)
255 ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 &new_flags) == true ? 0 : -EINVAL;
257 else
258 ret = ttm_bo_validate(bo, &placement, &ctx);
259
260
261 WARN_ON(ret == 0 && bo->mem.start != 0);
262 if (!ret)
263 vmw_bo_pin_reserved(buf, true);
264
265 ttm_bo_unreserve(bo);
266err_unlock:
267 ttm_write_unlock(&dev_priv->reservation_sem);
268
269 return ret;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284int vmw_bo_unpin(struct vmw_private *dev_priv,
285 struct vmw_buffer_object *buf,
286 bool interruptible)
287{
288 struct ttm_buffer_object *bo = &buf->base;
289 int ret;
290
291 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 if (unlikely(ret != 0))
293 return ret;
294
295 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 if (unlikely(ret != 0))
297 goto err;
298
299 vmw_bo_pin_reserved(buf, false);
300
301 ttm_bo_unreserve(bo);
302
303err:
304 ttm_read_unlock(&dev_priv->reservation_sem);
305 return ret;
306}
307
308
309
310
311
312
313
314
315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 SVGAGuestPtr *ptr)
317{
318 if (bo->mem.mem_type == TTM_PL_VRAM) {
319 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 ptr->offset = bo->mem.start << PAGE_SHIFT;
321 } else {
322 ptr->gmrId = bo->mem.start;
323 ptr->offset = 0;
324 }
325}
326
327
328
329
330
331
332
333
334
335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336{
337 struct ttm_operation_ctx ctx = { false, true };
338 struct ttm_place pl;
339 struct ttm_placement placement;
340 struct ttm_buffer_object *bo = &vbo->base;
341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret;
343
344 dma_resv_assert_held(bo->base.resv);
345
346 if (pin == !!bo->pin_count)
347 return;
348
349 pl.fpfn = 0;
350 pl.lpfn = 0;
351 pl.mem_type = bo->mem.mem_type;
352 pl.flags = bo->mem.placement;
353
354 memset(&placement, 0, sizeof(placement));
355 placement.num_placement = 1;
356 placement.placement = &pl;
357
358 ret = ttm_bo_validate(bo, &placement, &ctx);
359
360 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
361
362 if (pin)
363 ttm_bo_pin(bo);
364 else
365 ttm_bo_unpin(bo);
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
384{
385 struct ttm_buffer_object *bo = &vbo->base;
386 bool not_used;
387 void *virtual;
388 int ret;
389
390 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
391 if (virtual)
392 return virtual;
393
394 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
395 if (ret)
396 DRM_ERROR("Buffer object map failed: %d.\n", ret);
397
398 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
399}
400
401
402
403
404
405
406
407
408
409
410void vmw_bo_unmap(struct vmw_buffer_object *vbo)
411{
412 if (vbo->map.bo == NULL)
413 return;
414
415 ttm_bo_kunmap(&vbo->map);
416}
417
418
419
420
421
422
423
424
425
426static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
427 bool user)
428{
429 static size_t struct_size, user_struct_size;
430 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
431 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
432
433 if (unlikely(struct_size == 0)) {
434 size_t backend_size = ttm_round_pot(vmw_tt_size);
435
436 struct_size = backend_size +
437 ttm_round_pot(sizeof(struct vmw_buffer_object));
438 user_struct_size = backend_size +
439 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
440 TTM_OBJ_EXTRA_SIZE;
441 }
442
443 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
444 page_array_size +=
445 ttm_round_pot(num_pages * sizeof(dma_addr_t));
446
447 return ((user) ? user_struct_size : struct_size) +
448 page_array_size;
449}
450
451
452
453
454
455
456
457void vmw_bo_bo_free(struct ttm_buffer_object *bo)
458{
459 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
460
461 WARN_ON(vmw_bo->dirty);
462 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
463 vmw_bo_unmap(vmw_bo);
464 kfree(vmw_bo);
465}
466
467
468
469
470
471
472
473static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
474{
475 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
476 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
477
478 WARN_ON(vbo->dirty);
479 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
480 vmw_bo_unmap(vbo);
481 ttm_prime_object_kfree(vmw_user_bo, prime);
482}
483
484
485
486
487
488
489
490
491
492
493
494int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
495 struct ttm_placement *placement,
496 struct ttm_buffer_object **p_bo)
497{
498 unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
499 struct ttm_operation_ctx ctx = { false, false };
500 struct ttm_buffer_object *bo;
501 size_t acc_size;
502 int ret;
503
504 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
505 if (unlikely(!bo))
506 return -ENOMEM;
507
508 acc_size = ttm_round_pot(sizeof(*bo));
509 acc_size += ttm_round_pot(npages * sizeof(void *));
510 acc_size += ttm_round_pot(sizeof(struct ttm_tt));
511 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
512 ttm_bo_type_device, placement, 0,
513 &ctx, acc_size, NULL, NULL, NULL);
514 if (unlikely(ret))
515 goto error_free;
516
517 ttm_bo_pin(bo);
518 ttm_bo_unreserve(bo);
519 *p_bo = bo;
520
521 return 0;
522
523error_free:
524 kfree(bo);
525 return ret;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542int vmw_bo_init(struct vmw_private *dev_priv,
543 struct vmw_buffer_object *vmw_bo,
544 size_t size, struct ttm_placement *placement,
545 bool interruptible, bool pin,
546 void (*bo_free)(struct ttm_buffer_object *bo))
547{
548 struct ttm_operation_ctx ctx = { interruptible, false };
549 struct ttm_bo_device *bdev = &dev_priv->bdev;
550 size_t acc_size;
551 int ret;
552 bool user = (bo_free == &vmw_user_bo_destroy);
553
554 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
555
556 acc_size = vmw_bo_acc_size(dev_priv, size, user);
557 memset(vmw_bo, 0, sizeof(*vmw_bo));
558 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
559 vmw_bo->base.priority = 3;
560 vmw_bo->res_tree = RB_ROOT;
561
562 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
563 ttm_bo_type_device, placement,
564 0, &ctx, acc_size, NULL, NULL, bo_free);
565 if (unlikely(ret))
566 return ret;
567
568 if (pin)
569 ttm_bo_pin(&vmw_bo->base);
570 ttm_bo_unreserve(&vmw_bo->base);
571 return 0;
572}
573
574
575
576
577
578
579
580
581
582
583
584static void vmw_user_bo_release(struct ttm_base_object **p_base)
585{
586 struct vmw_user_buffer_object *vmw_user_bo;
587 struct ttm_base_object *base = *p_base;
588
589 *p_base = NULL;
590
591 if (unlikely(base == NULL))
592 return;
593
594 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
595 prime.base);
596 ttm_bo_put(&vmw_user_bo->vbo.base);
597}
598
599
600
601
602
603
604
605
606
607
608
609
610static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
611 enum ttm_ref_type ref_type)
612{
613 struct vmw_user_buffer_object *user_bo;
614
615 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
616
617 switch (ref_type) {
618 case TTM_REF_SYNCCPU_WRITE:
619 atomic_dec(&user_bo->vbo.cpu_writers);
620 break;
621 default:
622 WARN_ONCE(true, "Undefined buffer object reference release.\n");
623 }
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640int vmw_user_bo_alloc(struct vmw_private *dev_priv,
641 struct ttm_object_file *tfile,
642 uint32_t size,
643 bool shareable,
644 uint32_t *handle,
645 struct vmw_buffer_object **p_vbo,
646 struct ttm_base_object **p_base)
647{
648 struct vmw_user_buffer_object *user_bo;
649 int ret;
650
651 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
652 if (unlikely(!user_bo)) {
653 DRM_ERROR("Failed to allocate a buffer.\n");
654 return -ENOMEM;
655 }
656
657 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
658 (dev_priv->has_mob) ?
659 &vmw_sys_placement :
660 &vmw_vram_sys_placement, true, false,
661 &vmw_user_bo_destroy);
662 if (unlikely(ret != 0))
663 return ret;
664
665 ttm_bo_get(&user_bo->vbo.base);
666 ret = ttm_prime_object_init(tfile,
667 size,
668 &user_bo->prime,
669 shareable,
670 ttm_buffer_type,
671 &vmw_user_bo_release,
672 &vmw_user_bo_ref_obj_release);
673 if (unlikely(ret != 0)) {
674 ttm_bo_put(&user_bo->vbo.base);
675 goto out_no_base_object;
676 }
677
678 *p_vbo = &user_bo->vbo;
679 if (p_base) {
680 *p_base = &user_bo->prime.base;
681 kref_get(&(*p_base)->refcount);
682 }
683 *handle = user_bo->prime.base.handle;
684
685out_no_base_object:
686 return ret;
687}
688
689
690
691
692
693
694
695
696
697int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
698 struct ttm_object_file *tfile)
699{
700 struct vmw_user_buffer_object *vmw_user_bo;
701
702 if (unlikely(bo->destroy != vmw_user_bo_destroy))
703 return -EPERM;
704
705 vmw_user_bo = vmw_user_buffer_object(bo);
706
707
708 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
709 return 0;
710
711 DRM_ERROR("Could not grant buffer access.\n");
712 return -EPERM;
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
732 struct ttm_object_file *tfile,
733 uint32_t flags)
734{
735 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
736 struct ttm_buffer_object *bo = &user_bo->vbo.base;
737 bool existed;
738 int ret;
739
740 if (flags & drm_vmw_synccpu_allow_cs) {
741 long lret;
742
743 lret = dma_resv_wait_timeout_rcu
744 (bo->base.resv, true, true,
745 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
746 if (!lret)
747 return -EBUSY;
748 else if (lret < 0)
749 return lret;
750 return 0;
751 }
752
753 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
754 if (unlikely(ret != 0))
755 return ret;
756
757 ret = ttm_bo_wait(bo, true, nonblock);
758 if (likely(ret == 0))
759 atomic_inc(&user_bo->vbo.cpu_writers);
760
761 ttm_bo_unreserve(bo);
762 if (unlikely(ret != 0))
763 return ret;
764
765 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
766 TTM_REF_SYNCCPU_WRITE, &existed, false);
767 if (ret != 0 || existed)
768 atomic_dec(&user_bo->vbo.cpu_writers);
769
770 return ret;
771}
772
773
774
775
776
777
778
779
780
781static int vmw_user_bo_synccpu_release(uint32_t handle,
782 struct ttm_object_file *tfile,
783 uint32_t flags)
784{
785 if (!(flags & drm_vmw_synccpu_allow_cs))
786 return ttm_ref_object_base_unref(tfile, handle,
787 TTM_REF_SYNCCPU_WRITE);
788
789 return 0;
790}
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file_priv)
807{
808 struct drm_vmw_synccpu_arg *arg =
809 (struct drm_vmw_synccpu_arg *) data;
810 struct vmw_buffer_object *vbo;
811 struct vmw_user_buffer_object *user_bo;
812 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
813 struct ttm_base_object *buffer_base;
814 int ret;
815
816 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
817 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
818 drm_vmw_synccpu_dontblock |
819 drm_vmw_synccpu_allow_cs)) != 0) {
820 DRM_ERROR("Illegal synccpu flags.\n");
821 return -EINVAL;
822 }
823
824 switch (arg->op) {
825 case drm_vmw_synccpu_grab:
826 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
827 &buffer_base);
828 if (unlikely(ret != 0))
829 return ret;
830
831 user_bo = container_of(vbo, struct vmw_user_buffer_object,
832 vbo);
833 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
834 vmw_bo_unreference(&vbo);
835 ttm_base_object_unref(&buffer_base);
836 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
837 ret != -EBUSY)) {
838 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
839 (unsigned int) arg->handle);
840 return ret;
841 }
842 break;
843 case drm_vmw_synccpu_release:
844 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
845 arg->flags);
846 if (unlikely(ret != 0)) {
847 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
848 (unsigned int) arg->handle);
849 return ret;
850 }
851 break;
852 default:
853 DRM_ERROR("Invalid synccpu operation.\n");
854 return -EINVAL;
855 }
856
857 return 0;
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
874 struct drm_file *file_priv)
875{
876 struct vmw_private *dev_priv = vmw_priv(dev);
877 union drm_vmw_alloc_dmabuf_arg *arg =
878 (union drm_vmw_alloc_dmabuf_arg *)data;
879 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
880 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
881 struct vmw_buffer_object *vbo;
882 uint32_t handle;
883 int ret;
884
885 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
886 if (unlikely(ret != 0))
887 return ret;
888
889 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
890 req->size, false, &handle, &vbo,
891 NULL);
892 if (unlikely(ret != 0))
893 goto out_no_bo;
894
895 rep->handle = handle;
896 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
897 rep->cur_gmr_id = handle;
898 rep->cur_gmr_offset = 0;
899
900 vmw_bo_unreference(&vbo);
901
902out_no_bo:
903 ttm_read_unlock(&dev_priv->reservation_sem);
904
905 return ret;
906}
907
908
909
910
911
912
913
914
915
916
917
918
919
920int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
921 struct drm_file *file_priv)
922{
923 struct drm_vmw_unref_dmabuf_arg *arg =
924 (struct drm_vmw_unref_dmabuf_arg *)data;
925
926 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
927 arg->handle,
928 TTM_REF_USAGE);
929}
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946int vmw_user_bo_lookup(struct ttm_object_file *tfile,
947 uint32_t handle, struct vmw_buffer_object **out,
948 struct ttm_base_object **p_base)
949{
950 struct vmw_user_buffer_object *vmw_user_bo;
951 struct ttm_base_object *base;
952
953 base = ttm_base_object_lookup(tfile, handle);
954 if (unlikely(base == NULL)) {
955 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
956 (unsigned long)handle);
957 return -ESRCH;
958 }
959
960 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
961 ttm_base_object_unref(&base);
962 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
963 (unsigned long)handle);
964 return -EINVAL;
965 }
966
967 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
968 prime.base);
969 ttm_bo_get(&vmw_user_bo->vbo.base);
970 if (p_base)
971 *p_base = base;
972 else
973 ttm_base_object_unref(&base);
974 *out = &vmw_user_bo->vbo;
975
976 return 0;
977}
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996struct vmw_buffer_object *
997vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
998{
999 struct vmw_user_buffer_object *vmw_user_bo;
1000 struct ttm_base_object *base;
1001
1002 base = ttm_base_object_noref_lookup(tfile, handle);
1003 if (!base) {
1004 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1005 (unsigned long)handle);
1006 return ERR_PTR(-ESRCH);
1007 }
1008
1009 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1010 ttm_base_object_noref_release();
1011 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1012 (unsigned long)handle);
1013 return ERR_PTR(-EINVAL);
1014 }
1015
1016 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1017 prime.base);
1018 return &vmw_user_bo->vbo;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029int vmw_user_bo_reference(struct ttm_object_file *tfile,
1030 struct vmw_buffer_object *vbo,
1031 uint32_t *handle)
1032{
1033 struct vmw_user_buffer_object *user_bo;
1034
1035 if (vbo->base.destroy != vmw_user_bo_destroy)
1036 return -EINVAL;
1037
1038 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1039
1040 *handle = user_bo->prime.base.handle;
1041 return ttm_ref_object_add(tfile, &user_bo->prime.base,
1042 TTM_REF_USAGE, NULL, false);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1059 struct vmw_fence_obj *fence)
1060{
1061 struct ttm_bo_device *bdev = bo->bdev;
1062
1063 struct vmw_private *dev_priv =
1064 container_of(bdev, struct vmw_private, bdev);
1065
1066 if (fence == NULL) {
1067 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1068 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1069 dma_fence_put(&fence->base);
1070 } else
1071 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087int vmw_dumb_create(struct drm_file *file_priv,
1088 struct drm_device *dev,
1089 struct drm_mode_create_dumb *args)
1090{
1091 struct vmw_private *dev_priv = vmw_priv(dev);
1092 struct vmw_buffer_object *vbo;
1093 int ret;
1094
1095 args->pitch = args->width * ((args->bpp + 7) / 8);
1096 args->size = args->pitch * args->height;
1097
1098 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1099 if (unlikely(ret != 0))
1100 return ret;
1101
1102 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1103 args->size, false, &args->handle,
1104 &vbo, NULL);
1105 if (unlikely(ret != 0))
1106 goto out_no_bo;
1107
1108 vmw_bo_unreference(&vbo);
1109out_no_bo:
1110 ttm_read_unlock(&dev_priv->reservation_sem);
1111 return ret;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126int vmw_dumb_map_offset(struct drm_file *file_priv,
1127 struct drm_device *dev, uint32_t handle,
1128 uint64_t *offset)
1129{
1130 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1131 struct vmw_buffer_object *out_buf;
1132 int ret;
1133
1134 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1135 if (ret != 0)
1136 return -EINVAL;
1137
1138 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1139 vmw_bo_unreference(&out_buf);
1140 return 0;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154int vmw_dumb_destroy(struct drm_file *file_priv,
1155 struct drm_device *dev,
1156 uint32_t handle)
1157{
1158 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1159 handle, TTM_REF_USAGE);
1160}
1161
1162
1163
1164
1165
1166
1167
1168void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1169{
1170
1171 if (bo->destroy != vmw_bo_bo_free &&
1172 bo->destroy != vmw_user_bo_destroy)
1173 return;
1174
1175
1176 vmw_bo_unmap(vmw_buffer_object(bo));
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1191 struct ttm_resource *mem)
1192{
1193 struct vmw_buffer_object *vbo;
1194
1195
1196 if (bo->destroy != vmw_bo_bo_free &&
1197 bo->destroy != vmw_user_bo_destroy)
1198 return;
1199
1200 vbo = container_of(bo, struct vmw_buffer_object, base);
1201
1202
1203
1204
1205
1206
1207 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1208 vmw_bo_unmap(vbo);
1209
1210
1211
1212
1213
1214
1215 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1216 vmw_resource_unbind_list(vbo);
1217}
1218