1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "vmwgfx_drv.h"
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
33#include "vmwgfx_resource_priv.h"
34#include "vmwgfx_binding.h"
35
36#define VMW_RES_EVICT_ERR_COUNT 10
37
38struct vmw_user_dma_buffer {
39 struct ttm_prime_object prime;
40 struct vmw_dma_buffer dma;
41};
42
43struct vmw_bo_user_rep {
44 uint32_t handle;
45 uint64_t map_handle;
46};
47
48struct vmw_stream {
49 struct vmw_resource res;
50 uint32_t stream_id;
51};
52
53struct vmw_user_stream {
54 struct ttm_base_object base;
55 struct vmw_stream stream;
56};
57
58
59static uint64_t vmw_user_stream_size;
60
61static const struct vmw_res_func vmw_stream_func = {
62 .res_type = vmw_res_stream,
63 .needs_backup = false,
64 .may_evict = false,
65 .type_name = "video streams",
66 .backup_placement = NULL,
67 .create = NULL,
68 .destroy = NULL,
69 .bind = NULL,
70 .unbind = NULL
71};
72
73static inline struct vmw_dma_buffer *
74vmw_dma_buffer(struct ttm_buffer_object *bo)
75{
76 return container_of(bo, struct vmw_dma_buffer, base);
77}
78
79static inline struct vmw_user_dma_buffer *
80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81{
82 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
84}
85
86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87{
88 kref_get(&res->kref);
89 return res;
90}
91
92struct vmw_resource *
93vmw_resource_reference_unless_doomed(struct vmw_resource *res)
94{
95 return kref_get_unless_zero(&res->kref) ? res : NULL;
96}
97
98
99
100
101
102
103
104
105void vmw_resource_release_id(struct vmw_resource *res)
106{
107 struct vmw_private *dev_priv = res->dev_priv;
108 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
109
110 write_lock(&dev_priv->resource_lock);
111 if (res->id != -1)
112 idr_remove(idr, res->id);
113 res->id = -1;
114 write_unlock(&dev_priv->resource_lock);
115}
116
117static void vmw_resource_release(struct kref *kref)
118{
119 struct vmw_resource *res =
120 container_of(kref, struct vmw_resource, kref);
121 struct vmw_private *dev_priv = res->dev_priv;
122 int id;
123 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
124
125 write_lock(&dev_priv->resource_lock);
126 res->avail = false;
127 list_del_init(&res->lru_head);
128 write_unlock(&dev_priv->resource_lock);
129 if (res->backup) {
130 struct ttm_buffer_object *bo = &res->backup->base;
131
132 ttm_bo_reserve(bo, false, false, NULL);
133 if (!list_empty(&res->mob_head) &&
134 res->func->unbind != NULL) {
135 struct ttm_validate_buffer val_buf;
136
137 val_buf.bo = bo;
138 val_buf.shared = false;
139 res->func->unbind(res, false, &val_buf);
140 }
141 res->backup_dirty = false;
142 list_del_init(&res->mob_head);
143 ttm_bo_unreserve(bo);
144 vmw_dmabuf_unreference(&res->backup);
145 }
146
147 if (likely(res->hw_destroy != NULL)) {
148 mutex_lock(&dev_priv->binding_mutex);
149 vmw_binding_res_list_kill(&res->binding_head);
150 mutex_unlock(&dev_priv->binding_mutex);
151 res->hw_destroy(res);
152 }
153
154 id = res->id;
155 if (res->res_free != NULL)
156 res->res_free(res);
157 else
158 kfree(res);
159
160 write_lock(&dev_priv->resource_lock);
161 if (id != -1)
162 idr_remove(idr, id);
163 write_unlock(&dev_priv->resource_lock);
164}
165
166void vmw_resource_unreference(struct vmw_resource **p_res)
167{
168 struct vmw_resource *res = *p_res;
169
170 *p_res = NULL;
171 kref_put(&res->kref, vmw_resource_release);
172}
173
174
175
176
177
178
179
180
181
182
183int vmw_resource_alloc_id(struct vmw_resource *res)
184{
185 struct vmw_private *dev_priv = res->dev_priv;
186 int ret;
187 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188
189 BUG_ON(res->id != -1);
190
191 idr_preload(GFP_KERNEL);
192 write_lock(&dev_priv->resource_lock);
193
194 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195 if (ret >= 0)
196 res->id = ret;
197
198 write_unlock(&dev_priv->resource_lock);
199 idr_preload_end();
200 return ret < 0 ? ret : 0;
201}
202
203
204
205
206
207
208
209
210
211
212
213
214int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 bool delay_id,
216 void (*res_free) (struct vmw_resource *res),
217 const struct vmw_res_func *func)
218{
219 kref_init(&res->kref);
220 res->hw_destroy = NULL;
221 res->res_free = res_free;
222 res->avail = false;
223 res->dev_priv = dev_priv;
224 res->func = func;
225 INIT_LIST_HEAD(&res->lru_head);
226 INIT_LIST_HEAD(&res->mob_head);
227 INIT_LIST_HEAD(&res->binding_head);
228 res->id = -1;
229 res->backup = NULL;
230 res->backup_offset = 0;
231 res->backup_dirty = false;
232 res->res_dirty = false;
233 if (delay_id)
234 return 0;
235 else
236 return vmw_resource_alloc_id(res);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251void vmw_resource_activate(struct vmw_resource *res,
252 void (*hw_destroy) (struct vmw_resource *))
253{
254 struct vmw_private *dev_priv = res->dev_priv;
255
256 write_lock(&dev_priv->resource_lock);
257 res->avail = true;
258 res->hw_destroy = hw_destroy;
259 write_unlock(&dev_priv->resource_lock);
260}
261
262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 struct idr *idr, int id)
264{
265 struct vmw_resource *res;
266
267 read_lock(&dev_priv->resource_lock);
268 res = idr_find(idr, id);
269 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
270 res = NULL;
271
272 read_unlock(&dev_priv->resource_lock);
273
274 if (unlikely(res == NULL))
275 return NULL;
276
277 return res;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
295 struct ttm_object_file *tfile,
296 uint32_t handle,
297 const struct vmw_user_resource_conv
298 *converter,
299 struct vmw_resource **p_res)
300{
301 struct ttm_base_object *base;
302 struct vmw_resource *res;
303 int ret = -EINVAL;
304
305 base = ttm_base_object_lookup(tfile, handle);
306 if (unlikely(base == NULL))
307 return -EINVAL;
308
309 if (unlikely(ttm_base_object_type(base) != converter->object_type))
310 goto out_bad_resource;
311
312 res = converter->base_obj_to_res(base);
313
314 read_lock(&dev_priv->resource_lock);
315 if (!res->avail || res->res_free != converter->res_free) {
316 read_unlock(&dev_priv->resource_lock);
317 goto out_bad_resource;
318 }
319
320 kref_get(&res->kref);
321 read_unlock(&dev_priv->resource_lock);
322
323 *p_res = res;
324 ret = 0;
325
326out_bad_resource:
327 ttm_base_object_unref(&base);
328
329 return ret;
330}
331
332
333
334
335
336
337int vmw_user_lookup_handle(struct vmw_private *dev_priv,
338 struct ttm_object_file *tfile,
339 uint32_t handle,
340 struct vmw_surface **out_surf,
341 struct vmw_dma_buffer **out_buf)
342{
343 struct vmw_resource *res;
344 int ret;
345
346 BUG_ON(*out_surf || *out_buf);
347
348 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
349 user_surface_converter,
350 &res);
351 if (!ret) {
352 *out_surf = vmw_res_to_srf(res);
353 return 0;
354 }
355
356 *out_surf = NULL;
357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
358 return ret;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
373 bool user)
374{
375 static size_t struct_size, user_struct_size;
376 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
377 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
378
379 if (unlikely(struct_size == 0)) {
380 size_t backend_size = ttm_round_pot(vmw_tt_size);
381
382 struct_size = backend_size +
383 ttm_round_pot(sizeof(struct vmw_dma_buffer));
384 user_struct_size = backend_size +
385 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
386 }
387
388 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
389 page_array_size +=
390 ttm_round_pot(num_pages * sizeof(dma_addr_t));
391
392 return ((user) ? user_struct_size : struct_size) +
393 page_array_size;
394}
395
396void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
397{
398 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
399
400 kfree(vmw_bo);
401}
402
403static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
404{
405 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
406
407 ttm_prime_object_kfree(vmw_user_bo, prime);
408}
409
410int vmw_dmabuf_init(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *vmw_bo,
412 size_t size, struct ttm_placement *placement,
413 bool interruptible,
414 void (*bo_free) (struct ttm_buffer_object *bo))
415{
416 struct ttm_bo_device *bdev = &dev_priv->bdev;
417 size_t acc_size;
418 int ret;
419 bool user = (bo_free == &vmw_user_dmabuf_destroy);
420
421 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
422
423 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
424 memset(vmw_bo, 0, sizeof(*vmw_bo));
425
426 INIT_LIST_HEAD(&vmw_bo->res_list);
427
428 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
429 ttm_bo_type_device, placement,
430 0, interruptible,
431 NULL, acc_size, NULL, NULL, bo_free);
432 return ret;
433}
434
435static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
436{
437 struct vmw_user_dma_buffer *vmw_user_bo;
438 struct ttm_base_object *base = *p_base;
439 struct ttm_buffer_object *bo;
440
441 *p_base = NULL;
442
443 if (unlikely(base == NULL))
444 return;
445
446 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
447 prime.base);
448 bo = &vmw_user_bo->dma.base;
449 ttm_bo_unref(&bo);
450}
451
452static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
453 enum ttm_ref_type ref_type)
454{
455 struct vmw_user_dma_buffer *user_bo;
456 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
457
458 switch (ref_type) {
459 case TTM_REF_SYNCCPU_WRITE:
460 ttm_bo_synccpu_write_release(&user_bo->dma.base);
461 break;
462 default:
463 BUG();
464 }
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
481 uint32_t size,
482 bool shareable,
483 uint32_t *handle,
484 struct vmw_dma_buffer **p_dma_buf,
485 struct ttm_base_object **p_base)
486{
487 struct vmw_user_dma_buffer *user_bo;
488 struct ttm_buffer_object *tmp;
489 int ret;
490
491 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
492 if (unlikely(user_bo == NULL)) {
493 DRM_ERROR("Failed to allocate a buffer.\n");
494 return -ENOMEM;
495 }
496
497 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
498 (dev_priv->has_mob) ?
499 &vmw_sys_placement :
500 &vmw_vram_sys_placement, true,
501 &vmw_user_dmabuf_destroy);
502 if (unlikely(ret != 0))
503 return ret;
504
505 tmp = ttm_bo_reference(&user_bo->dma.base);
506 ret = ttm_prime_object_init(tfile,
507 size,
508 &user_bo->prime,
509 shareable,
510 ttm_buffer_type,
511 &vmw_user_dmabuf_release,
512 &vmw_user_dmabuf_ref_obj_release);
513 if (unlikely(ret != 0)) {
514 ttm_bo_unref(&tmp);
515 goto out_no_base_object;
516 }
517
518 *p_dma_buf = &user_bo->dma;
519 if (p_base) {
520 *p_base = &user_bo->prime.base;
521 kref_get(&(*p_base)->refcount);
522 }
523 *handle = user_bo->prime.base.hash.key;
524
525out_no_base_object:
526 return ret;
527}
528
529
530
531
532
533
534
535
536int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
537 struct ttm_object_file *tfile)
538{
539 struct vmw_user_dma_buffer *vmw_user_bo;
540
541 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
542 return -EPERM;
543
544 vmw_user_bo = vmw_user_dma_buffer(bo);
545
546
547 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
548 return 0;
549
550 DRM_ERROR("Could not grant buffer access.\n");
551 return -EPERM;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
566 struct ttm_object_file *tfile,
567 uint32_t flags)
568{
569 struct ttm_buffer_object *bo = &user_bo->dma.base;
570 bool existed;
571 int ret;
572
573 if (flags & drm_vmw_synccpu_allow_cs) {
574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
575 long lret;
576
577 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
578 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
579 if (!lret)
580 return -EBUSY;
581 else if (lret < 0)
582 return lret;
583 return 0;
584 }
585
586 ret = ttm_bo_synccpu_write_grab
587 (bo, !!(flags & drm_vmw_synccpu_dontblock));
588 if (unlikely(ret != 0))
589 return ret;
590
591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
592 TTM_REF_SYNCCPU_WRITE, &existed, false);
593 if (ret != 0 || existed)
594 ttm_bo_synccpu_write_release(&user_bo->dma.base);
595
596 return ret;
597}
598
599
600
601
602
603
604
605
606
607static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
608 struct ttm_object_file *tfile,
609 uint32_t flags)
610{
611 if (!(flags & drm_vmw_synccpu_allow_cs))
612 return ttm_ref_object_base_unref(tfile, handle,
613 TTM_REF_SYNCCPU_WRITE);
614
615 return 0;
616}
617
618
619
620
621
622
623
624
625
626
627
628
629int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
630 struct drm_file *file_priv)
631{
632 struct drm_vmw_synccpu_arg *arg =
633 (struct drm_vmw_synccpu_arg *) data;
634 struct vmw_dma_buffer *dma_buf;
635 struct vmw_user_dma_buffer *user_bo;
636 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
637 struct ttm_base_object *buffer_base;
638 int ret;
639
640 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
641 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
642 drm_vmw_synccpu_dontblock |
643 drm_vmw_synccpu_allow_cs)) != 0) {
644 DRM_ERROR("Illegal synccpu flags.\n");
645 return -EINVAL;
646 }
647
648 switch (arg->op) {
649 case drm_vmw_synccpu_grab:
650 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
651 &buffer_base);
652 if (unlikely(ret != 0))
653 return ret;
654
655 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
656 dma);
657 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
658 vmw_dmabuf_unreference(&dma_buf);
659 ttm_base_object_unref(&buffer_base);
660 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
661 ret != -EBUSY)) {
662 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
663 (unsigned int) arg->handle);
664 return ret;
665 }
666 break;
667 case drm_vmw_synccpu_release:
668 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
669 arg->flags);
670 if (unlikely(ret != 0)) {
671 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
672 (unsigned int) arg->handle);
673 return ret;
674 }
675 break;
676 default:
677 DRM_ERROR("Invalid synccpu operation.\n");
678 return -EINVAL;
679 }
680
681 return 0;
682}
683
684int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
685 struct drm_file *file_priv)
686{
687 struct vmw_private *dev_priv = vmw_priv(dev);
688 union drm_vmw_alloc_dmabuf_arg *arg =
689 (union drm_vmw_alloc_dmabuf_arg *)data;
690 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
691 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
692 struct vmw_dma_buffer *dma_buf;
693 uint32_t handle;
694 int ret;
695
696 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
697 if (unlikely(ret != 0))
698 return ret;
699
700 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
701 req->size, false, &handle, &dma_buf,
702 NULL);
703 if (unlikely(ret != 0))
704 goto out_no_dmabuf;
705
706 rep->handle = handle;
707 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
708 rep->cur_gmr_id = handle;
709 rep->cur_gmr_offset = 0;
710
711 vmw_dmabuf_unreference(&dma_buf);
712
713out_no_dmabuf:
714 ttm_read_unlock(&dev_priv->reservation_sem);
715
716 return ret;
717}
718
719int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
720 struct drm_file *file_priv)
721{
722 struct drm_vmw_unref_dmabuf_arg *arg =
723 (struct drm_vmw_unref_dmabuf_arg *)data;
724
725 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
726 arg->handle,
727 TTM_REF_USAGE);
728}
729
730int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
731 uint32_t handle, struct vmw_dma_buffer **out,
732 struct ttm_base_object **p_base)
733{
734 struct vmw_user_dma_buffer *vmw_user_bo;
735 struct ttm_base_object *base;
736
737 base = ttm_base_object_lookup(tfile, handle);
738 if (unlikely(base == NULL)) {
739 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
740 (unsigned long)handle);
741 return -ESRCH;
742 }
743
744 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
745 ttm_base_object_unref(&base);
746 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
747 (unsigned long)handle);
748 return -EINVAL;
749 }
750
751 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
752 prime.base);
753 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
754 if (p_base)
755 *p_base = base;
756 else
757 ttm_base_object_unref(&base);
758 *out = &vmw_user_bo->dma;
759
760 return 0;
761}
762
763int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
764 struct vmw_dma_buffer *dma_buf,
765 uint32_t *handle)
766{
767 struct vmw_user_dma_buffer *user_bo;
768
769 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
770 return -EINVAL;
771
772 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
773
774 *handle = user_bo->prime.base.hash.key;
775 return ttm_ref_object_add(tfile, &user_bo->prime.base,
776 TTM_REF_USAGE, NULL, false);
777}
778
779
780
781
782
783static void vmw_stream_destroy(struct vmw_resource *res)
784{
785 struct vmw_private *dev_priv = res->dev_priv;
786 struct vmw_stream *stream;
787 int ret;
788
789 DRM_INFO("%s: unref\n", __func__);
790 stream = container_of(res, struct vmw_stream, res);
791
792 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
793 WARN_ON(ret != 0);
794}
795
796static int vmw_stream_init(struct vmw_private *dev_priv,
797 struct vmw_stream *stream,
798 void (*res_free) (struct vmw_resource *res))
799{
800 struct vmw_resource *res = &stream->res;
801 int ret;
802
803 ret = vmw_resource_init(dev_priv, res, false, res_free,
804 &vmw_stream_func);
805
806 if (unlikely(ret != 0)) {
807 if (res_free == NULL)
808 kfree(stream);
809 else
810 res_free(&stream->res);
811 return ret;
812 }
813
814 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
815 if (ret) {
816 vmw_resource_unreference(&res);
817 return ret;
818 }
819
820 DRM_INFO("%s: claimed\n", __func__);
821
822 vmw_resource_activate(&stream->res, vmw_stream_destroy);
823 return 0;
824}
825
826static void vmw_user_stream_free(struct vmw_resource *res)
827{
828 struct vmw_user_stream *stream =
829 container_of(res, struct vmw_user_stream, stream.res);
830 struct vmw_private *dev_priv = res->dev_priv;
831
832 ttm_base_object_kfree(stream, base);
833 ttm_mem_global_free(vmw_mem_glob(dev_priv),
834 vmw_user_stream_size);
835}
836
837
838
839
840
841
842static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
843{
844 struct ttm_base_object *base = *p_base;
845 struct vmw_user_stream *stream =
846 container_of(base, struct vmw_user_stream, base);
847 struct vmw_resource *res = &stream->stream.res;
848
849 *p_base = NULL;
850 vmw_resource_unreference(&res);
851}
852
853int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
854 struct drm_file *file_priv)
855{
856 struct vmw_private *dev_priv = vmw_priv(dev);
857 struct vmw_resource *res;
858 struct vmw_user_stream *stream;
859 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
860 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
861 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
862 int ret = 0;
863
864
865 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
866 if (unlikely(res == NULL))
867 return -EINVAL;
868
869 if (res->res_free != &vmw_user_stream_free) {
870 ret = -EINVAL;
871 goto out;
872 }
873
874 stream = container_of(res, struct vmw_user_stream, stream.res);
875 if (stream->base.tfile != tfile) {
876 ret = -EINVAL;
877 goto out;
878 }
879
880 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
881out:
882 vmw_resource_unreference(&res);
883 return ret;
884}
885
886int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
887 struct drm_file *file_priv)
888{
889 struct vmw_private *dev_priv = vmw_priv(dev);
890 struct vmw_user_stream *stream;
891 struct vmw_resource *res;
892 struct vmw_resource *tmp;
893 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
894 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
895 int ret;
896
897
898
899
900
901
902 if (unlikely(vmw_user_stream_size == 0))
903 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
904
905 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
906 if (unlikely(ret != 0))
907 return ret;
908
909 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
910 vmw_user_stream_size,
911 false, true);
912 ttm_read_unlock(&dev_priv->reservation_sem);
913 if (unlikely(ret != 0)) {
914 if (ret != -ERESTARTSYS)
915 DRM_ERROR("Out of graphics memory for stream"
916 " creation.\n");
917
918 goto out_ret;
919 }
920
921 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
922 if (unlikely(stream == NULL)) {
923 ttm_mem_global_free(vmw_mem_glob(dev_priv),
924 vmw_user_stream_size);
925 ret = -ENOMEM;
926 goto out_ret;
927 }
928
929 res = &stream->stream.res;
930 stream->base.shareable = false;
931 stream->base.tfile = NULL;
932
933
934
935
936
937 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
938 if (unlikely(ret != 0))
939 goto out_ret;
940
941 tmp = vmw_resource_reference(res);
942 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
943 &vmw_user_stream_base_release, NULL);
944
945 if (unlikely(ret != 0)) {
946 vmw_resource_unreference(&tmp);
947 goto out_err;
948 }
949
950 arg->stream_id = res->id;
951out_err:
952 vmw_resource_unreference(&res);
953out_ret:
954 return ret;
955}
956
957int vmw_user_stream_lookup(struct vmw_private *dev_priv,
958 struct ttm_object_file *tfile,
959 uint32_t *inout_id, struct vmw_resource **out)
960{
961 struct vmw_user_stream *stream;
962 struct vmw_resource *res;
963 int ret;
964
965 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
966 *inout_id);
967 if (unlikely(res == NULL))
968 return -EINVAL;
969
970 if (res->res_free != &vmw_user_stream_free) {
971 ret = -EINVAL;
972 goto err_ref;
973 }
974
975 stream = container_of(res, struct vmw_user_stream, stream.res);
976 if (stream->base.tfile != tfile) {
977 ret = -EPERM;
978 goto err_ref;
979 }
980
981 *inout_id = stream->stream.stream_id;
982 *out = res;
983 return 0;
984err_ref:
985 vmw_resource_unreference(&res);
986 return ret;
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001int vmw_dumb_create(struct drm_file *file_priv,
1002 struct drm_device *dev,
1003 struct drm_mode_create_dumb *args)
1004{
1005 struct vmw_private *dev_priv = vmw_priv(dev);
1006 struct vmw_dma_buffer *dma_buf;
1007 int ret;
1008
1009 args->pitch = args->width * ((args->bpp + 7) / 8);
1010 args->size = args->pitch * args->height;
1011
1012 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1013 if (unlikely(ret != 0))
1014 return ret;
1015
1016 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1017 args->size, false, &args->handle,
1018 &dma_buf, NULL);
1019 if (unlikely(ret != 0))
1020 goto out_no_dmabuf;
1021
1022 vmw_dmabuf_unreference(&dma_buf);
1023out_no_dmabuf:
1024 ttm_read_unlock(&dev_priv->reservation_sem);
1025 return ret;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038int vmw_dumb_map_offset(struct drm_file *file_priv,
1039 struct drm_device *dev, uint32_t handle,
1040 uint64_t *offset)
1041{
1042 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1043 struct vmw_dma_buffer *out_buf;
1044 int ret;
1045
1046 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1047 if (ret != 0)
1048 return -EINVAL;
1049
1050 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1051 vmw_dmabuf_unreference(&out_buf);
1052 return 0;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064int vmw_dumb_destroy(struct drm_file *file_priv,
1065 struct drm_device *dev,
1066 uint32_t handle)
1067{
1068 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1069 handle, TTM_REF_USAGE);
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079static int vmw_resource_buf_alloc(struct vmw_resource *res,
1080 bool interruptible)
1081{
1082 unsigned long size =
1083 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1084 struct vmw_dma_buffer *backup;
1085 int ret;
1086
1087 if (likely(res->backup)) {
1088 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1089 return 0;
1090 }
1091
1092 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1093 if (unlikely(backup == NULL))
1094 return -ENOMEM;
1095
1096 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1097 res->func->backup_placement,
1098 interruptible,
1099 &vmw_dmabuf_bo_free);
1100 if (unlikely(ret != 0))
1101 goto out_no_dmabuf;
1102
1103 res->backup = backup;
1104
1105out_no_dmabuf:
1106 return ret;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static int vmw_resource_do_validate(struct vmw_resource *res,
1121 struct ttm_validate_buffer *val_buf)
1122{
1123 int ret = 0;
1124 const struct vmw_res_func *func = res->func;
1125
1126 if (unlikely(res->id == -1)) {
1127 ret = func->create(res);
1128 if (unlikely(ret != 0))
1129 return ret;
1130 }
1131
1132 if (func->bind &&
1133 ((func->needs_backup && list_empty(&res->mob_head) &&
1134 val_buf->bo != NULL) ||
1135 (!func->needs_backup && val_buf->bo != NULL))) {
1136 ret = func->bind(res, val_buf);
1137 if (unlikely(ret != 0))
1138 goto out_bind_failed;
1139 if (func->needs_backup)
1140 list_add_tail(&res->mob_head, &res->backup->res_list);
1141 }
1142
1143
1144
1145
1146
1147
1148
1149 res->res_dirty = true;
1150
1151 return 0;
1152
1153out_bind_failed:
1154 func->destroy(res);
1155
1156 return ret;
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172void vmw_resource_unreserve(struct vmw_resource *res,
1173 bool switch_backup,
1174 struct vmw_dma_buffer *new_backup,
1175 unsigned long new_backup_offset)
1176{
1177 struct vmw_private *dev_priv = res->dev_priv;
1178
1179 if (!list_empty(&res->lru_head))
1180 return;
1181
1182 if (switch_backup && new_backup != res->backup) {
1183 if (res->backup) {
1184 lockdep_assert_held(&res->backup->base.resv->lock.base);
1185 list_del_init(&res->mob_head);
1186 vmw_dmabuf_unreference(&res->backup);
1187 }
1188
1189 if (new_backup) {
1190 res->backup = vmw_dmabuf_reference(new_backup);
1191 lockdep_assert_held(&new_backup->base.resv->lock.base);
1192 list_add_tail(&res->mob_head, &new_backup->res_list);
1193 } else {
1194 res->backup = NULL;
1195 }
1196 }
1197 if (switch_backup)
1198 res->backup_offset = new_backup_offset;
1199
1200 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1201 return;
1202
1203 write_lock(&dev_priv->resource_lock);
1204 list_add_tail(&res->lru_head,
1205 &res->dev_priv->res_lru[res->func->res_type]);
1206 write_unlock(&dev_priv->resource_lock);
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220static int
1221vmw_resource_check_buffer(struct vmw_resource *res,
1222 bool interruptible,
1223 struct ttm_validate_buffer *val_buf)
1224{
1225 struct list_head val_list;
1226 bool backup_dirty = false;
1227 int ret;
1228
1229 if (unlikely(res->backup == NULL)) {
1230 ret = vmw_resource_buf_alloc(res, interruptible);
1231 if (unlikely(ret != 0))
1232 return ret;
1233 }
1234
1235 INIT_LIST_HEAD(&val_list);
1236 val_buf->bo = ttm_bo_reference(&res->backup->base);
1237 val_buf->shared = false;
1238 list_add_tail(&val_buf->head, &val_list);
1239 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1240 if (unlikely(ret != 0))
1241 goto out_no_reserve;
1242
1243 if (res->func->needs_backup && list_empty(&res->mob_head))
1244 return 0;
1245
1246 backup_dirty = res->backup_dirty;
1247 ret = ttm_bo_validate(&res->backup->base,
1248 res->func->backup_placement,
1249 true, false);
1250
1251 if (unlikely(ret != 0))
1252 goto out_no_validate;
1253
1254 return 0;
1255
1256out_no_validate:
1257 ttm_eu_backoff_reservation(NULL, &val_list);
1258out_no_reserve:
1259 ttm_bo_unref(&val_buf->bo);
1260 if (backup_dirty)
1261 vmw_dmabuf_unreference(&res->backup);
1262
1263 return ret;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1277 bool no_backup)
1278{
1279 struct vmw_private *dev_priv = res->dev_priv;
1280 int ret;
1281
1282 write_lock(&dev_priv->resource_lock);
1283 list_del_init(&res->lru_head);
1284 write_unlock(&dev_priv->resource_lock);
1285
1286 if (res->func->needs_backup && res->backup == NULL &&
1287 !no_backup) {
1288 ret = vmw_resource_buf_alloc(res, interruptible);
1289 if (unlikely(ret != 0)) {
1290 DRM_ERROR("Failed to allocate a backup buffer "
1291 "of size %lu. bytes\n",
1292 (unsigned long) res->backup_size);
1293 return ret;
1294 }
1295 }
1296
1297 return 0;
1298}
1299
1300
1301
1302
1303
1304
1305
1306static void
1307vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1308{
1309 struct list_head val_list;
1310
1311 if (likely(val_buf->bo == NULL))
1312 return;
1313
1314 INIT_LIST_HEAD(&val_list);
1315 list_add_tail(&val_buf->head, &val_list);
1316 ttm_eu_backoff_reservation(NULL, &val_list);
1317 ttm_bo_unref(&val_buf->bo);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1328{
1329 struct ttm_validate_buffer val_buf;
1330 const struct vmw_res_func *func = res->func;
1331 int ret;
1332
1333 BUG_ON(!func->may_evict);
1334
1335 val_buf.bo = NULL;
1336 val_buf.shared = false;
1337 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1338 if (unlikely(ret != 0))
1339 return ret;
1340
1341 if (unlikely(func->unbind != NULL &&
1342 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1343 ret = func->unbind(res, res->res_dirty, &val_buf);
1344 if (unlikely(ret != 0))
1345 goto out_no_unbind;
1346 list_del_init(&res->mob_head);
1347 }
1348 ret = func->destroy(res);
1349 res->backup_dirty = true;
1350 res->res_dirty = false;
1351out_no_unbind:
1352 vmw_resource_backoff_reservation(&val_buf);
1353
1354 return ret;
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369int vmw_resource_validate(struct vmw_resource *res)
1370{
1371 int ret;
1372 struct vmw_resource *evict_res;
1373 struct vmw_private *dev_priv = res->dev_priv;
1374 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1375 struct ttm_validate_buffer val_buf;
1376 unsigned err_count = 0;
1377
1378 if (!res->func->create)
1379 return 0;
1380
1381 val_buf.bo = NULL;
1382 val_buf.shared = false;
1383 if (res->backup)
1384 val_buf.bo = &res->backup->base;
1385 do {
1386 ret = vmw_resource_do_validate(res, &val_buf);
1387 if (likely(ret != -EBUSY))
1388 break;
1389
1390 write_lock(&dev_priv->resource_lock);
1391 if (list_empty(lru_list) || !res->func->may_evict) {
1392 DRM_ERROR("Out of device device resources "
1393 "for %s.\n", res->func->type_name);
1394 ret = -EBUSY;
1395 write_unlock(&dev_priv->resource_lock);
1396 break;
1397 }
1398
1399 evict_res = vmw_resource_reference
1400 (list_first_entry(lru_list, struct vmw_resource,
1401 lru_head));
1402 list_del_init(&evict_res->lru_head);
1403
1404 write_unlock(&dev_priv->resource_lock);
1405
1406 ret = vmw_resource_do_evict(evict_res, true);
1407 if (unlikely(ret != 0)) {
1408 write_lock(&dev_priv->resource_lock);
1409 list_add_tail(&evict_res->lru_head, lru_list);
1410 write_unlock(&dev_priv->resource_lock);
1411 if (ret == -ERESTARTSYS ||
1412 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1413 vmw_resource_unreference(&evict_res);
1414 goto out_no_validate;
1415 }
1416 }
1417
1418 vmw_resource_unreference(&evict_res);
1419 } while (1);
1420
1421 if (unlikely(ret != 0))
1422 goto out_no_validate;
1423 else if (!res->func->needs_backup && res->backup) {
1424 list_del_init(&res->mob_head);
1425 vmw_dmabuf_unreference(&res->backup);
1426 }
1427
1428 return 0;
1429
1430out_no_validate:
1431 return ret;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1447 struct vmw_fence_obj *fence)
1448{
1449 struct ttm_bo_device *bdev = bo->bdev;
1450
1451 struct vmw_private *dev_priv =
1452 container_of(bdev, struct vmw_private, bdev);
1453
1454 if (fence == NULL) {
1455 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1456 reservation_object_add_excl_fence(bo->resv, &fence->base);
1457 dma_fence_put(&fence->base);
1458 } else
1459 reservation_object_add_excl_fence(bo->resv, &fence->base);
1460}
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1482 struct ttm_mem_reg *mem)
1483{
1484 struct vmw_dma_buffer *dma_buf;
1485
1486 if (mem == NULL)
1487 return;
1488
1489 if (bo->destroy != vmw_dmabuf_bo_free &&
1490 bo->destroy != vmw_user_dmabuf_destroy)
1491 return;
1492
1493 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1494
1495 if (mem->mem_type != VMW_PL_MOB) {
1496 struct vmw_resource *res, *n;
1497 struct ttm_validate_buffer val_buf;
1498
1499 val_buf.bo = bo;
1500 val_buf.shared = false;
1501
1502 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1503
1504 if (unlikely(res->func->unbind == NULL))
1505 continue;
1506
1507 (void) res->func->unbind(res, true, &val_buf);
1508 res->backup_dirty = true;
1509 res->res_dirty = false;
1510 list_del_init(&res->mob_head);
1511 }
1512
1513 (void) ttm_bo_wait(bo, false, false);
1514 }
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1528{
1529 struct vmw_resource *dx_query_ctx;
1530 struct vmw_private *dev_priv;
1531 struct {
1532 SVGA3dCmdHeader header;
1533 SVGA3dCmdDXReadbackAllQuery body;
1534 } *cmd;
1535
1536
1537
1538 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1539 return 0;
1540
1541 dx_query_ctx = dx_query_mob->dx_query_ctx;
1542 dev_priv = dx_query_ctx->dev_priv;
1543
1544 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1545 if (unlikely(cmd == NULL)) {
1546 DRM_ERROR("Failed reserving FIFO space for "
1547 "query MOB read back.\n");
1548 return -ENOMEM;
1549 }
1550
1551 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1552 cmd->header.size = sizeof(cmd->body);
1553 cmd->body.cid = dx_query_ctx->id;
1554
1555 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1556
1557
1558 dx_query_mob->dx_query_ctx = NULL;
1559
1560 return 0;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574void vmw_query_move_notify(struct ttm_buffer_object *bo,
1575 struct ttm_mem_reg *mem)
1576{
1577 struct vmw_dma_buffer *dx_query_mob;
1578 struct ttm_bo_device *bdev = bo->bdev;
1579 struct vmw_private *dev_priv;
1580
1581
1582 dev_priv = container_of(bdev, struct vmw_private, bdev);
1583
1584 mutex_lock(&dev_priv->binding_mutex);
1585
1586 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1587 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1588 mutex_unlock(&dev_priv->binding_mutex);
1589 return;
1590 }
1591
1592
1593 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1594 struct vmw_fence_obj *fence;
1595
1596 (void) vmw_query_readback_all(dx_query_mob);
1597 mutex_unlock(&dev_priv->binding_mutex);
1598
1599
1600 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1601 vmw_fence_single_bo(bo, fence);
1602
1603 if (fence != NULL)
1604 vmw_fence_obj_unreference(&fence);
1605
1606 (void) ttm_bo_wait(bo, false, false);
1607 } else
1608 mutex_unlock(&dev_priv->binding_mutex);
1609
1610}
1611
1612
1613
1614
1615
1616
1617bool vmw_resource_needs_backup(const struct vmw_resource *res)
1618{
1619 return res->func->needs_backup;
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1632 enum vmw_res_type type)
1633{
1634 struct list_head *lru_list = &dev_priv->res_lru[type];
1635 struct vmw_resource *evict_res;
1636 unsigned err_count = 0;
1637 int ret;
1638
1639 do {
1640 write_lock(&dev_priv->resource_lock);
1641
1642 if (list_empty(lru_list))
1643 goto out_unlock;
1644
1645 evict_res = vmw_resource_reference(
1646 list_first_entry(lru_list, struct vmw_resource,
1647 lru_head));
1648 list_del_init(&evict_res->lru_head);
1649 write_unlock(&dev_priv->resource_lock);
1650
1651 ret = vmw_resource_do_evict(evict_res, false);
1652 if (unlikely(ret != 0)) {
1653 write_lock(&dev_priv->resource_lock);
1654 list_add_tail(&evict_res->lru_head, lru_list);
1655 write_unlock(&dev_priv->resource_lock);
1656 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1657 vmw_resource_unreference(&evict_res);
1658 return;
1659 }
1660 }
1661
1662 vmw_resource_unreference(&evict_res);
1663 } while (1);
1664
1665out_unlock:
1666 write_unlock(&dev_priv->resource_lock);
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679void vmw_resource_evict_all(struct vmw_private *dev_priv)
1680{
1681 enum vmw_res_type type;
1682
1683 mutex_lock(&dev_priv->cmdbuf_mutex);
1684
1685 for (type = 0; type < vmw_res_max; ++type)
1686 vmw_resource_evict_type(dev_priv, type);
1687
1688 mutex_unlock(&dev_priv->cmdbuf_mutex);
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1702{
1703 struct vmw_private *dev_priv = res->dev_priv;
1704 int ret;
1705
1706 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1707 mutex_lock(&dev_priv->cmdbuf_mutex);
1708 ret = vmw_resource_reserve(res, interruptible, false);
1709 if (ret)
1710 goto out_no_reserve;
1711
1712 if (res->pin_count == 0) {
1713 struct vmw_dma_buffer *vbo = NULL;
1714
1715 if (res->backup) {
1716 vbo = res->backup;
1717
1718 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1719 if (!vbo->pin_count) {
1720 ret = ttm_bo_validate
1721 (&vbo->base,
1722 res->func->backup_placement,
1723 interruptible, false);
1724 if (ret) {
1725 ttm_bo_unreserve(&vbo->base);
1726 goto out_no_validate;
1727 }
1728 }
1729
1730
1731 vmw_bo_pin_reserved(vbo, true);
1732 }
1733 ret = vmw_resource_validate(res);
1734 if (vbo)
1735 ttm_bo_unreserve(&vbo->base);
1736 if (ret)
1737 goto out_no_validate;
1738 }
1739 res->pin_count++;
1740
1741out_no_validate:
1742 vmw_resource_unreserve(res, false, NULL, 0UL);
1743out_no_reserve:
1744 mutex_unlock(&dev_priv->cmdbuf_mutex);
1745 ttm_write_unlock(&dev_priv->reservation_sem);
1746
1747 return ret;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758void vmw_resource_unpin(struct vmw_resource *res)
1759{
1760 struct vmw_private *dev_priv = res->dev_priv;
1761 int ret;
1762
1763 ttm_read_lock(&dev_priv->reservation_sem, false);
1764 mutex_lock(&dev_priv->cmdbuf_mutex);
1765
1766 ret = vmw_resource_reserve(res, false, true);
1767 WARN_ON(ret);
1768
1769 WARN_ON(res->pin_count == 0);
1770 if (--res->pin_count == 0 && res->backup) {
1771 struct vmw_dma_buffer *vbo = res->backup;
1772
1773 ttm_bo_reserve(&vbo->base, false, false, NULL);
1774 vmw_bo_pin_reserved(vbo, false);
1775 ttm_bo_unreserve(&vbo->base);
1776 }
1777
1778 vmw_resource_unreserve(res, false, NULL, 0UL);
1779
1780 mutex_unlock(&dev_priv->cmdbuf_mutex);
1781 ttm_read_unlock(&dev_priv->reservation_sem);
1782}
1783
1784
1785
1786
1787
1788
1789enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1790{
1791 return res->func->res_type;
1792}
1793