1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "vmwgfx_drv.h"
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/drmP.h>
32#include "vmwgfx_resource_priv.h"
33#include "vmwgfx_binding.h"
34
35#define VMW_RES_EVICT_ERR_COUNT 10
36
37struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
38{
39 kref_get(&res->kref);
40 return res;
41}
42
43struct vmw_resource *
44vmw_resource_reference_unless_doomed(struct vmw_resource *res)
45{
46 return kref_get_unless_zero(&res->kref) ? res : NULL;
47}
48
49
50
51
52
53
54
55
56void vmw_resource_release_id(struct vmw_resource *res)
57{
58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60
61 spin_lock(&dev_priv->resource_lock);
62 if (res->id != -1)
63 idr_remove(idr, res->id);
64 res->id = -1;
65 spin_unlock(&dev_priv->resource_lock);
66}
67
68static void vmw_resource_release(struct kref *kref)
69{
70 struct vmw_resource *res =
71 container_of(kref, struct vmw_resource, kref);
72 struct vmw_private *dev_priv = res->dev_priv;
73 int id;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75
76 spin_lock(&dev_priv->resource_lock);
77 list_del_init(&res->lru_head);
78 spin_unlock(&dev_priv->resource_lock);
79 if (res->backup) {
80 struct ttm_buffer_object *bo = &res->backup->base;
81
82 ttm_bo_reserve(bo, false, false, NULL);
83 if (!list_empty(&res->mob_head) &&
84 res->func->unbind != NULL) {
85 struct ttm_validate_buffer val_buf;
86
87 val_buf.bo = bo;
88 val_buf.num_shared = 0;
89 res->func->unbind(res, false, &val_buf);
90 }
91 res->backup_dirty = false;
92 list_del_init(&res->mob_head);
93 ttm_bo_unreserve(bo);
94 vmw_bo_unreference(&res->backup);
95 }
96
97 if (likely(res->hw_destroy != NULL)) {
98 mutex_lock(&dev_priv->binding_mutex);
99 vmw_binding_res_list_kill(&res->binding_head);
100 mutex_unlock(&dev_priv->binding_mutex);
101 res->hw_destroy(res);
102 }
103
104 id = res->id;
105 if (res->res_free != NULL)
106 res->res_free(res);
107 else
108 kfree(res);
109
110 spin_lock(&dev_priv->resource_lock);
111 if (id != -1)
112 idr_remove(idr, id);
113 spin_unlock(&dev_priv->resource_lock);
114}
115
116void vmw_resource_unreference(struct vmw_resource **p_res)
117{
118 struct vmw_resource *res = *p_res;
119
120 *p_res = NULL;
121 kref_put(&res->kref, vmw_resource_release);
122}
123
124
125
126
127
128
129
130
131
132
133int vmw_resource_alloc_id(struct vmw_resource *res)
134{
135 struct vmw_private *dev_priv = res->dev_priv;
136 int ret;
137 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
138
139 BUG_ON(res->id != -1);
140
141 idr_preload(GFP_KERNEL);
142 spin_lock(&dev_priv->resource_lock);
143
144 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
145 if (ret >= 0)
146 res->id = ret;
147
148 spin_unlock(&dev_priv->resource_lock);
149 idr_preload_end();
150 return ret < 0 ? ret : 0;
151}
152
153
154
155
156
157
158
159
160
161
162
163
164int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
165 bool delay_id,
166 void (*res_free) (struct vmw_resource *res),
167 const struct vmw_res_func *func)
168{
169 kref_init(&res->kref);
170 res->hw_destroy = NULL;
171 res->res_free = res_free;
172 res->dev_priv = dev_priv;
173 res->func = func;
174 INIT_LIST_HEAD(&res->lru_head);
175 INIT_LIST_HEAD(&res->mob_head);
176 INIT_LIST_HEAD(&res->binding_head);
177 res->id = -1;
178 res->backup = NULL;
179 res->backup_offset = 0;
180 res->backup_dirty = false;
181 res->res_dirty = false;
182 if (delay_id)
183 return 0;
184 else
185 return vmw_resource_alloc_id(res);
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
204 struct ttm_object_file *tfile,
205 uint32_t handle,
206 const struct vmw_user_resource_conv
207 *converter,
208 struct vmw_resource **p_res)
209{
210 struct ttm_base_object *base;
211 struct vmw_resource *res;
212 int ret = -EINVAL;
213
214 base = ttm_base_object_lookup(tfile, handle);
215 if (unlikely(base == NULL))
216 return -EINVAL;
217
218 if (unlikely(ttm_base_object_type(base) != converter->object_type))
219 goto out_bad_resource;
220
221 res = converter->base_obj_to_res(base);
222 kref_get(&res->kref);
223
224 *p_res = res;
225 ret = 0;
226
227out_bad_resource:
228 ttm_base_object_unref(&base);
229
230 return ret;
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct vmw_resource *
248vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
249 struct ttm_object_file *tfile,
250 uint32_t handle,
251 const struct vmw_user_resource_conv
252 *converter)
253{
254 struct ttm_base_object *base;
255
256 base = ttm_base_object_noref_lookup(tfile, handle);
257 if (!base)
258 return ERR_PTR(-ESRCH);
259
260 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
261 ttm_base_object_noref_release();
262 return ERR_PTR(-EINVAL);
263 }
264
265 return converter->base_obj_to_res(base);
266}
267
268
269
270
271
272
273int vmw_user_lookup_handle(struct vmw_private *dev_priv,
274 struct ttm_object_file *tfile,
275 uint32_t handle,
276 struct vmw_surface **out_surf,
277 struct vmw_buffer_object **out_buf)
278{
279 struct vmw_resource *res;
280 int ret;
281
282 BUG_ON(*out_surf || *out_buf);
283
284 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
285 user_surface_converter,
286 &res);
287 if (!ret) {
288 *out_surf = vmw_res_to_srf(res);
289 return 0;
290 }
291
292 *out_surf = NULL;
293 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
294 return ret;
295}
296
297
298
299
300
301
302
303
304static int vmw_resource_buf_alloc(struct vmw_resource *res,
305 bool interruptible)
306{
307 unsigned long size =
308 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
309 struct vmw_buffer_object *backup;
310 int ret;
311
312 if (likely(res->backup)) {
313 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
314 return 0;
315 }
316
317 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
318 if (unlikely(!backup))
319 return -ENOMEM;
320
321 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
322 res->func->backup_placement,
323 interruptible,
324 &vmw_bo_bo_free);
325 if (unlikely(ret != 0))
326 goto out_no_bo;
327
328 res->backup = backup;
329
330out_no_bo:
331 return ret;
332}
333
334
335
336
337
338
339
340
341
342
343
344
345static int vmw_resource_do_validate(struct vmw_resource *res,
346 struct ttm_validate_buffer *val_buf)
347{
348 int ret = 0;
349 const struct vmw_res_func *func = res->func;
350
351 if (unlikely(res->id == -1)) {
352 ret = func->create(res);
353 if (unlikely(ret != 0))
354 return ret;
355 }
356
357 if (func->bind &&
358 ((func->needs_backup && list_empty(&res->mob_head) &&
359 val_buf->bo != NULL) ||
360 (!func->needs_backup && val_buf->bo != NULL))) {
361 ret = func->bind(res, val_buf);
362 if (unlikely(ret != 0))
363 goto out_bind_failed;
364 if (func->needs_backup)
365 list_add_tail(&res->mob_head, &res->backup->res_list);
366 }
367
368
369
370
371
372
373
374 res->res_dirty = true;
375
376 return 0;
377
378out_bind_failed:
379 func->destroy(res);
380
381 return ret;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397void vmw_resource_unreserve(struct vmw_resource *res,
398 bool switch_backup,
399 struct vmw_buffer_object *new_backup,
400 unsigned long new_backup_offset)
401{
402 struct vmw_private *dev_priv = res->dev_priv;
403
404 if (!list_empty(&res->lru_head))
405 return;
406
407 if (switch_backup && new_backup != res->backup) {
408 if (res->backup) {
409 lockdep_assert_held(&res->backup->base.resv->lock.base);
410 list_del_init(&res->mob_head);
411 vmw_bo_unreference(&res->backup);
412 }
413
414 if (new_backup) {
415 res->backup = vmw_bo_reference(new_backup);
416 lockdep_assert_held(&new_backup->base.resv->lock.base);
417 list_add_tail(&res->mob_head, &new_backup->res_list);
418 } else {
419 res->backup = NULL;
420 }
421 }
422 if (switch_backup)
423 res->backup_offset = new_backup_offset;
424
425 if (!res->func->may_evict || res->id == -1 || res->pin_count)
426 return;
427
428 spin_lock(&dev_priv->resource_lock);
429 list_add_tail(&res->lru_head,
430 &res->dev_priv->res_lru[res->func->res_type]);
431 spin_unlock(&dev_priv->resource_lock);
432}
433
434
435
436
437
438
439
440
441
442
443
444
445
446static int
447vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
448 struct vmw_resource *res,
449 bool interruptible,
450 struct ttm_validate_buffer *val_buf)
451{
452 struct ttm_operation_ctx ctx = { true, false };
453 struct list_head val_list;
454 bool backup_dirty = false;
455 int ret;
456
457 if (unlikely(res->backup == NULL)) {
458 ret = vmw_resource_buf_alloc(res, interruptible);
459 if (unlikely(ret != 0))
460 return ret;
461 }
462
463 INIT_LIST_HEAD(&val_list);
464 ttm_bo_get(&res->backup->base);
465 val_buf->bo = &res->backup->base;
466 val_buf->num_shared = 0;
467 list_add_tail(&val_buf->head, &val_list);
468 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
469 if (unlikely(ret != 0))
470 goto out_no_reserve;
471
472 if (res->func->needs_backup && list_empty(&res->mob_head))
473 return 0;
474
475 backup_dirty = res->backup_dirty;
476 ret = ttm_bo_validate(&res->backup->base,
477 res->func->backup_placement,
478 &ctx);
479
480 if (unlikely(ret != 0))
481 goto out_no_validate;
482
483 return 0;
484
485out_no_validate:
486 ttm_eu_backoff_reservation(ticket, &val_list);
487out_no_reserve:
488 ttm_bo_put(val_buf->bo);
489 val_buf->bo = NULL;
490 if (backup_dirty)
491 vmw_bo_unreference(&res->backup);
492
493 return ret;
494}
495
496
497
498
499
500
501
502
503
504
505
506int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
507 bool no_backup)
508{
509 struct vmw_private *dev_priv = res->dev_priv;
510 int ret;
511
512 spin_lock(&dev_priv->resource_lock);
513 list_del_init(&res->lru_head);
514 spin_unlock(&dev_priv->resource_lock);
515
516 if (res->func->needs_backup && res->backup == NULL &&
517 !no_backup) {
518 ret = vmw_resource_buf_alloc(res, interruptible);
519 if (unlikely(ret != 0)) {
520 DRM_ERROR("Failed to allocate a backup buffer "
521 "of size %lu. bytes\n",
522 (unsigned long) res->backup_size);
523 return ret;
524 }
525 }
526
527 return 0;
528}
529
530
531
532
533
534
535
536
537static void
538vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
539 struct ttm_validate_buffer *val_buf)
540{
541 struct list_head val_list;
542
543 if (likely(val_buf->bo == NULL))
544 return;
545
546 INIT_LIST_HEAD(&val_list);
547 list_add_tail(&val_buf->head, &val_list);
548 ttm_eu_backoff_reservation(ticket, &val_list);
549 ttm_bo_put(val_buf->bo);
550 val_buf->bo = NULL;
551}
552
553
554
555
556
557
558
559
560
561static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
562 struct vmw_resource *res, bool interruptible)
563{
564 struct ttm_validate_buffer val_buf;
565 const struct vmw_res_func *func = res->func;
566 int ret;
567
568 BUG_ON(!func->may_evict);
569
570 val_buf.bo = NULL;
571 val_buf.num_shared = 0;
572 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
573 if (unlikely(ret != 0))
574 return ret;
575
576 if (unlikely(func->unbind != NULL &&
577 (!func->needs_backup || !list_empty(&res->mob_head)))) {
578 ret = func->unbind(res, res->res_dirty, &val_buf);
579 if (unlikely(ret != 0))
580 goto out_no_unbind;
581 list_del_init(&res->mob_head);
582 }
583 ret = func->destroy(res);
584 res->backup_dirty = true;
585 res->res_dirty = false;
586out_no_unbind:
587 vmw_resource_backoff_reservation(ticket, &val_buf);
588
589 return ret;
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607int vmw_resource_validate(struct vmw_resource *res, bool intr)
608{
609 int ret;
610 struct vmw_resource *evict_res;
611 struct vmw_private *dev_priv = res->dev_priv;
612 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
613 struct ttm_validate_buffer val_buf;
614 unsigned err_count = 0;
615
616 if (!res->func->create)
617 return 0;
618
619 val_buf.bo = NULL;
620 val_buf.num_shared = 0;
621 if (res->backup)
622 val_buf.bo = &res->backup->base;
623 do {
624 ret = vmw_resource_do_validate(res, &val_buf);
625 if (likely(ret != -EBUSY))
626 break;
627
628 spin_lock(&dev_priv->resource_lock);
629 if (list_empty(lru_list) || !res->func->may_evict) {
630 DRM_ERROR("Out of device device resources "
631 "for %s.\n", res->func->type_name);
632 ret = -EBUSY;
633 spin_unlock(&dev_priv->resource_lock);
634 break;
635 }
636
637 evict_res = vmw_resource_reference
638 (list_first_entry(lru_list, struct vmw_resource,
639 lru_head));
640 list_del_init(&evict_res->lru_head);
641
642 spin_unlock(&dev_priv->resource_lock);
643
644
645 ret = vmw_resource_do_evict(NULL, evict_res, intr);
646 if (unlikely(ret != 0)) {
647 spin_lock(&dev_priv->resource_lock);
648 list_add_tail(&evict_res->lru_head, lru_list);
649 spin_unlock(&dev_priv->resource_lock);
650 if (ret == -ERESTARTSYS ||
651 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
652 vmw_resource_unreference(&evict_res);
653 goto out_no_validate;
654 }
655 }
656
657 vmw_resource_unreference(&evict_res);
658 } while (1);
659
660 if (unlikely(ret != 0))
661 goto out_no_validate;
662 else if (!res->func->needs_backup && res->backup) {
663 list_del_init(&res->mob_head);
664 vmw_bo_unreference(&res->backup);
665 }
666
667 return 0;
668
669out_no_validate:
670 return ret;
671}
672
673
674
675
676
677
678
679
680
681
682
683
684
685void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
686{
687
688 struct vmw_resource *res, *next;
689 struct ttm_validate_buffer val_buf = {
690 .bo = &vbo->base,
691 .num_shared = 0
692 };
693
694 lockdep_assert_held(&vbo->base.resv->lock.base);
695 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
696 if (!res->func->unbind)
697 continue;
698
699 (void) res->func->unbind(res, true, &val_buf);
700 res->backup_dirty = true;
701 res->res_dirty = false;
702 list_del_init(&res->mob_head);
703 }
704
705 (void) ttm_bo_wait(&vbo->base, false, false);
706}
707
708
709
710
711
712
713
714
715
716
717int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
718{
719 struct vmw_resource *dx_query_ctx;
720 struct vmw_private *dev_priv;
721 struct {
722 SVGA3dCmdHeader header;
723 SVGA3dCmdDXReadbackAllQuery body;
724 } *cmd;
725
726
727
728 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
729 return 0;
730
731 dx_query_ctx = dx_query_mob->dx_query_ctx;
732 dev_priv = dx_query_ctx->dev_priv;
733
734 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
735 if (unlikely(cmd == NULL)) {
736 DRM_ERROR("Failed reserving FIFO space for "
737 "query MOB read back.\n");
738 return -ENOMEM;
739 }
740
741 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
742 cmd->header.size = sizeof(cmd->body);
743 cmd->body.cid = dx_query_ctx->id;
744
745 vmw_fifo_commit(dev_priv, sizeof(*cmd));
746
747
748 dx_query_mob->dx_query_ctx = NULL;
749
750 return 0;
751}
752
753
754
755
756
757
758
759
760
761
762
763
764void vmw_query_move_notify(struct ttm_buffer_object *bo,
765 struct ttm_mem_reg *mem)
766{
767 struct vmw_buffer_object *dx_query_mob;
768 struct ttm_bo_device *bdev = bo->bdev;
769 struct vmw_private *dev_priv;
770
771
772 dev_priv = container_of(bdev, struct vmw_private, bdev);
773
774 mutex_lock(&dev_priv->binding_mutex);
775
776 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
777 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
778 mutex_unlock(&dev_priv->binding_mutex);
779 return;
780 }
781
782
783 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
784 struct vmw_fence_obj *fence;
785
786 (void) vmw_query_readback_all(dx_query_mob);
787 mutex_unlock(&dev_priv->binding_mutex);
788
789
790 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
791 vmw_bo_fence_single(bo, fence);
792
793 if (fence != NULL)
794 vmw_fence_obj_unreference(&fence);
795
796 (void) ttm_bo_wait(bo, false, false);
797 } else
798 mutex_unlock(&dev_priv->binding_mutex);
799
800}
801
802
803
804
805
806
807bool vmw_resource_needs_backup(const struct vmw_resource *res)
808{
809 return res->func->needs_backup;
810}
811
812
813
814
815
816
817
818
819
820
821static void vmw_resource_evict_type(struct vmw_private *dev_priv,
822 enum vmw_res_type type)
823{
824 struct list_head *lru_list = &dev_priv->res_lru[type];
825 struct vmw_resource *evict_res;
826 unsigned err_count = 0;
827 int ret;
828 struct ww_acquire_ctx ticket;
829
830 do {
831 spin_lock(&dev_priv->resource_lock);
832
833 if (list_empty(lru_list))
834 goto out_unlock;
835
836 evict_res = vmw_resource_reference(
837 list_first_entry(lru_list, struct vmw_resource,
838 lru_head));
839 list_del_init(&evict_res->lru_head);
840 spin_unlock(&dev_priv->resource_lock);
841
842
843 ret = vmw_resource_do_evict(&ticket, evict_res, false);
844 if (unlikely(ret != 0)) {
845 spin_lock(&dev_priv->resource_lock);
846 list_add_tail(&evict_res->lru_head, lru_list);
847 spin_unlock(&dev_priv->resource_lock);
848 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
849 vmw_resource_unreference(&evict_res);
850 return;
851 }
852 }
853
854 vmw_resource_unreference(&evict_res);
855 } while (1);
856
857out_unlock:
858 spin_unlock(&dev_priv->resource_lock);
859}
860
861
862
863
864
865
866
867
868
869
870
871void vmw_resource_evict_all(struct vmw_private *dev_priv)
872{
873 enum vmw_res_type type;
874
875 mutex_lock(&dev_priv->cmdbuf_mutex);
876
877 for (type = 0; type < vmw_res_max; ++type)
878 vmw_resource_evict_type(dev_priv, type);
879
880 mutex_unlock(&dev_priv->cmdbuf_mutex);
881}
882
883
884
885
886
887
888
889
890
891
892
893int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
894{
895 struct ttm_operation_ctx ctx = { interruptible, false };
896 struct vmw_private *dev_priv = res->dev_priv;
897 int ret;
898
899 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
900 mutex_lock(&dev_priv->cmdbuf_mutex);
901 ret = vmw_resource_reserve(res, interruptible, false);
902 if (ret)
903 goto out_no_reserve;
904
905 if (res->pin_count == 0) {
906 struct vmw_buffer_object *vbo = NULL;
907
908 if (res->backup) {
909 vbo = res->backup;
910
911 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
912 if (!vbo->pin_count) {
913 ret = ttm_bo_validate
914 (&vbo->base,
915 res->func->backup_placement,
916 &ctx);
917 if (ret) {
918 ttm_bo_unreserve(&vbo->base);
919 goto out_no_validate;
920 }
921 }
922
923
924 vmw_bo_pin_reserved(vbo, true);
925 }
926 ret = vmw_resource_validate(res, interruptible);
927 if (vbo)
928 ttm_bo_unreserve(&vbo->base);
929 if (ret)
930 goto out_no_validate;
931 }
932 res->pin_count++;
933
934out_no_validate:
935 vmw_resource_unreserve(res, false, NULL, 0UL);
936out_no_reserve:
937 mutex_unlock(&dev_priv->cmdbuf_mutex);
938 ttm_write_unlock(&dev_priv->reservation_sem);
939
940 return ret;
941}
942
943
944
945
946
947
948
949
950
951void vmw_resource_unpin(struct vmw_resource *res)
952{
953 struct vmw_private *dev_priv = res->dev_priv;
954 int ret;
955
956 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
957 mutex_lock(&dev_priv->cmdbuf_mutex);
958
959 ret = vmw_resource_reserve(res, false, true);
960 WARN_ON(ret);
961
962 WARN_ON(res->pin_count == 0);
963 if (--res->pin_count == 0 && res->backup) {
964 struct vmw_buffer_object *vbo = res->backup;
965
966 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
967 vmw_bo_pin_reserved(vbo, false);
968 ttm_bo_unreserve(&vbo->base);
969 }
970
971 vmw_resource_unreserve(res, false, NULL, 0UL);
972
973 mutex_unlock(&dev_priv->cmdbuf_mutex);
974 ttm_read_unlock(&dev_priv->reservation_sem);
975}
976
977
978
979
980
981
982enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
983{
984 return res->func->res_type;
985}
986