1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/ttm/ttm_placement.h>
29
30#include "vmwgfx_drv.h"
31#include "vmwgfx_resource_priv.h"
32#include "vmwgfx_binding.h"
33
34struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37 struct vmw_ctx_binding_state *cbs;
38 struct vmw_cmdbuf_res_manager *man;
39 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
40 spinlock_t cotable_lock;
41 struct vmw_buffer_object *dx_query_mob;
42};
43
44static void vmw_user_context_free(struct vmw_resource *res);
45static struct vmw_resource *
46vmw_user_context_base_to_res(struct ttm_base_object *base);
47
48static int vmw_gb_context_create(struct vmw_resource *res);
49static int vmw_gb_context_bind(struct vmw_resource *res,
50 struct ttm_validate_buffer *val_buf);
51static int vmw_gb_context_unbind(struct vmw_resource *res,
52 bool readback,
53 struct ttm_validate_buffer *val_buf);
54static int vmw_gb_context_destroy(struct vmw_resource *res);
55static int vmw_dx_context_create(struct vmw_resource *res);
56static int vmw_dx_context_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58static int vmw_dx_context_unbind(struct vmw_resource *res,
59 bool readback,
60 struct ttm_validate_buffer *val_buf);
61static int vmw_dx_context_destroy(struct vmw_resource *res);
62
63static uint64_t vmw_user_context_size;
64
65static const struct vmw_user_resource_conv user_context_conv = {
66 .object_type = VMW_RES_CONTEXT,
67 .base_obj_to_res = vmw_user_context_base_to_res,
68 .res_free = vmw_user_context_free
69};
70
71const struct vmw_user_resource_conv *user_context_converter =
72 &user_context_conv;
73
74
75static const struct vmw_res_func vmw_legacy_context_func = {
76 .res_type = vmw_res_context,
77 .needs_backup = false,
78 .may_evict = false,
79 .type_name = "legacy contexts",
80 .backup_placement = NULL,
81 .create = NULL,
82 .destroy = NULL,
83 .bind = NULL,
84 .unbind = NULL
85};
86
87static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_backup = true,
90 .may_evict = true,
91 .type_name = "guest backed contexts",
92 .backup_placement = &vmw_mob_placement,
93 .create = vmw_gb_context_create,
94 .destroy = vmw_gb_context_destroy,
95 .bind = vmw_gb_context_bind,
96 .unbind = vmw_gb_context_unbind
97};
98
99static const struct vmw_res_func vmw_dx_context_func = {
100 .res_type = vmw_res_dx_context,
101 .needs_backup = true,
102 .may_evict = true,
103 .type_name = "dx contexts",
104 .backup_placement = &vmw_mob_placement,
105 .create = vmw_dx_context_create,
106 .destroy = vmw_dx_context_destroy,
107 .bind = vmw_dx_context_bind,
108 .unbind = vmw_dx_context_unbind
109};
110
111
112
113
114
115static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
116{
117 struct vmw_resource *res;
118 int i;
119
120 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
121 spin_lock(&uctx->cotable_lock);
122 res = uctx->cotables[i];
123 uctx->cotables[i] = NULL;
124 spin_unlock(&uctx->cotable_lock);
125
126 if (res)
127 vmw_resource_unreference(&res);
128 }
129}
130
131static void vmw_hw_context_destroy(struct vmw_resource *res)
132{
133 struct vmw_user_context *uctx =
134 container_of(res, struct vmw_user_context, res);
135 struct vmw_private *dev_priv = res->dev_priv;
136 struct {
137 SVGA3dCmdHeader header;
138 SVGA3dCmdDestroyContext body;
139 } *cmd;
140
141
142 if (res->func->destroy == vmw_gb_context_destroy ||
143 res->func->destroy == vmw_dx_context_destroy) {
144 mutex_lock(&dev_priv->cmdbuf_mutex);
145 vmw_cmdbuf_res_man_destroy(uctx->man);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_binding_state_kill(uctx->cbs);
148 (void) res->func->destroy(res);
149 mutex_unlock(&dev_priv->binding_mutex);
150 if (dev_priv->pinned_bo != NULL &&
151 !dev_priv->query_cid_valid)
152 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
153 mutex_unlock(&dev_priv->cmdbuf_mutex);
154 vmw_context_cotables_unref(uctx);
155 return;
156 }
157
158 vmw_execbuf_release_pinned_bo(dev_priv);
159 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
160 if (unlikely(cmd == NULL))
161 return;
162
163 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
164 cmd->header.size = sizeof(cmd->body);
165 cmd->body.cid = res->id;
166
167 vmw_fifo_commit(dev_priv, sizeof(*cmd));
168 vmw_fifo_resource_dec(dev_priv);
169}
170
171static int vmw_gb_context_init(struct vmw_private *dev_priv,
172 bool dx,
173 struct vmw_resource *res,
174 void (*res_free)(struct vmw_resource *res))
175{
176 int ret, i;
177 struct vmw_user_context *uctx =
178 container_of(res, struct vmw_user_context, res);
179
180 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
181 SVGA3D_CONTEXT_DATA_SIZE);
182 ret = vmw_resource_init(dev_priv, res, true,
183 res_free,
184 dx ? &vmw_dx_context_func :
185 &vmw_gb_context_func);
186 if (unlikely(ret != 0))
187 goto out_err;
188
189 if (dev_priv->has_mob) {
190 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
191 if (IS_ERR(uctx->man)) {
192 ret = PTR_ERR(uctx->man);
193 uctx->man = NULL;
194 goto out_err;
195 }
196 }
197
198 uctx->cbs = vmw_binding_state_alloc(dev_priv);
199 if (IS_ERR(uctx->cbs)) {
200 ret = PTR_ERR(uctx->cbs);
201 goto out_err;
202 }
203
204 spin_lock_init(&uctx->cotable_lock);
205
206 if (dx) {
207 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
208 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
209 &uctx->res, i);
210 if (IS_ERR(uctx->cotables[i])) {
211 ret = PTR_ERR(uctx->cotables[i]);
212 goto out_cotables;
213 }
214 }
215 }
216
217 res->hw_destroy = vmw_hw_context_destroy;
218 return 0;
219
220out_cotables:
221 vmw_context_cotables_unref(uctx);
222out_err:
223 if (res_free)
224 res_free(res);
225 else
226 kfree(res);
227 return ret;
228}
229
230static int vmw_context_init(struct vmw_private *dev_priv,
231 struct vmw_resource *res,
232 void (*res_free)(struct vmw_resource *res),
233 bool dx)
234{
235 int ret;
236
237 struct {
238 SVGA3dCmdHeader header;
239 SVGA3dCmdDefineContext body;
240 } *cmd;
241
242 if (dev_priv->has_mob)
243 return vmw_gb_context_init(dev_priv, dx, res, res_free);
244
245 ret = vmw_resource_init(dev_priv, res, false,
246 res_free, &vmw_legacy_context_func);
247
248 if (unlikely(ret != 0)) {
249 DRM_ERROR("Failed to allocate a resource id.\n");
250 goto out_early;
251 }
252
253 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
254 DRM_ERROR("Out of hw context ids.\n");
255 vmw_resource_unreference(&res);
256 return -ENOMEM;
257 }
258
259 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
260 if (unlikely(cmd == NULL)) {
261 vmw_resource_unreference(&res);
262 return -ENOMEM;
263 }
264
265 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
266 cmd->header.size = sizeof(cmd->body);
267 cmd->body.cid = res->id;
268
269 vmw_fifo_commit(dev_priv, sizeof(*cmd));
270 vmw_fifo_resource_inc(dev_priv);
271 res->hw_destroy = vmw_hw_context_destroy;
272 return 0;
273
274out_early:
275 if (res_free == NULL)
276 kfree(res);
277 else
278 res_free(res);
279 return ret;
280}
281
282
283
284
285
286
287static int vmw_gb_context_create(struct vmw_resource *res)
288{
289 struct vmw_private *dev_priv = res->dev_priv;
290 int ret;
291 struct {
292 SVGA3dCmdHeader header;
293 SVGA3dCmdDefineGBContext body;
294 } *cmd;
295
296 if (likely(res->id != -1))
297 return 0;
298
299 ret = vmw_resource_alloc_id(res);
300 if (unlikely(ret != 0)) {
301 DRM_ERROR("Failed to allocate a context id.\n");
302 goto out_no_id;
303 }
304
305 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
306 ret = -EBUSY;
307 goto out_no_fifo;
308 }
309
310 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
311 if (unlikely(cmd == NULL)) {
312 ret = -ENOMEM;
313 goto out_no_fifo;
314 }
315
316 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
317 cmd->header.size = sizeof(cmd->body);
318 cmd->body.cid = res->id;
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
320 vmw_fifo_resource_inc(dev_priv);
321
322 return 0;
323
324out_no_fifo:
325 vmw_resource_release_id(res);
326out_no_id:
327 return ret;
328}
329
330static int vmw_gb_context_bind(struct vmw_resource *res,
331 struct ttm_validate_buffer *val_buf)
332{
333 struct vmw_private *dev_priv = res->dev_priv;
334 struct {
335 SVGA3dCmdHeader header;
336 SVGA3dCmdBindGBContext body;
337 } *cmd;
338 struct ttm_buffer_object *bo = val_buf->bo;
339
340 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
341
342 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
343 if (unlikely(cmd == NULL))
344 return -ENOMEM;
345
346 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
347 cmd->header.size = sizeof(cmd->body);
348 cmd->body.cid = res->id;
349 cmd->body.mobid = bo->mem.start;
350 cmd->body.validContents = res->backup_dirty;
351 res->backup_dirty = false;
352 vmw_fifo_commit(dev_priv, sizeof(*cmd));
353
354 return 0;
355}
356
357static int vmw_gb_context_unbind(struct vmw_resource *res,
358 bool readback,
359 struct ttm_validate_buffer *val_buf)
360{
361 struct vmw_private *dev_priv = res->dev_priv;
362 struct ttm_buffer_object *bo = val_buf->bo;
363 struct vmw_fence_obj *fence;
364 struct vmw_user_context *uctx =
365 container_of(res, struct vmw_user_context, res);
366
367 struct {
368 SVGA3dCmdHeader header;
369 SVGA3dCmdReadbackGBContext body;
370 } *cmd1;
371 struct {
372 SVGA3dCmdHeader header;
373 SVGA3dCmdBindGBContext body;
374 } *cmd2;
375 uint32_t submit_size;
376 uint8_t *cmd;
377
378
379 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
380
381 mutex_lock(&dev_priv->binding_mutex);
382 vmw_binding_state_scrub(uctx->cbs);
383
384 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
385
386 cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
387 if (unlikely(cmd == NULL)) {
388 mutex_unlock(&dev_priv->binding_mutex);
389 return -ENOMEM;
390 }
391
392 cmd2 = (void *) cmd;
393 if (readback) {
394 cmd1 = (void *) cmd;
395 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
396 cmd1->header.size = sizeof(cmd1->body);
397 cmd1->body.cid = res->id;
398 cmd2 = (void *) (&cmd1[1]);
399 }
400 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
401 cmd2->header.size = sizeof(cmd2->body);
402 cmd2->body.cid = res->id;
403 cmd2->body.mobid = SVGA3D_INVALID_ID;
404
405 vmw_fifo_commit(dev_priv, submit_size);
406 mutex_unlock(&dev_priv->binding_mutex);
407
408
409
410
411
412 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
413 &fence, NULL);
414
415 vmw_bo_fence_single(bo, fence);
416
417 if (likely(fence != NULL))
418 vmw_fence_obj_unreference(&fence);
419
420 return 0;
421}
422
423static int vmw_gb_context_destroy(struct vmw_resource *res)
424{
425 struct vmw_private *dev_priv = res->dev_priv;
426 struct {
427 SVGA3dCmdHeader header;
428 SVGA3dCmdDestroyGBContext body;
429 } *cmd;
430
431 if (likely(res->id == -1))
432 return 0;
433
434 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
435 if (unlikely(cmd == NULL))
436 return -ENOMEM;
437
438 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
439 cmd->header.size = sizeof(cmd->body);
440 cmd->body.cid = res->id;
441 vmw_fifo_commit(dev_priv, sizeof(*cmd));
442 if (dev_priv->query_cid == res->id)
443 dev_priv->query_cid_valid = false;
444 vmw_resource_release_id(res);
445 vmw_fifo_resource_dec(dev_priv);
446
447 return 0;
448}
449
450
451
452
453
454static int vmw_dx_context_create(struct vmw_resource *res)
455{
456 struct vmw_private *dev_priv = res->dev_priv;
457 int ret;
458 struct {
459 SVGA3dCmdHeader header;
460 SVGA3dCmdDXDefineContext body;
461 } *cmd;
462
463 if (likely(res->id != -1))
464 return 0;
465
466 ret = vmw_resource_alloc_id(res);
467 if (unlikely(ret != 0)) {
468 DRM_ERROR("Failed to allocate a context id.\n");
469 goto out_no_id;
470 }
471
472 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
473 ret = -EBUSY;
474 goto out_no_fifo;
475 }
476
477 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
478 if (unlikely(cmd == NULL)) {
479 ret = -ENOMEM;
480 goto out_no_fifo;
481 }
482
483 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
484 cmd->header.size = sizeof(cmd->body);
485 cmd->body.cid = res->id;
486 vmw_fifo_commit(dev_priv, sizeof(*cmd));
487 vmw_fifo_resource_inc(dev_priv);
488
489 return 0;
490
491out_no_fifo:
492 vmw_resource_release_id(res);
493out_no_id:
494 return ret;
495}
496
497static int vmw_dx_context_bind(struct vmw_resource *res,
498 struct ttm_validate_buffer *val_buf)
499{
500 struct vmw_private *dev_priv = res->dev_priv;
501 struct {
502 SVGA3dCmdHeader header;
503 SVGA3dCmdDXBindContext body;
504 } *cmd;
505 struct ttm_buffer_object *bo = val_buf->bo;
506
507 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
508
509 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
510 if (unlikely(cmd == NULL))
511 return -ENOMEM;
512
513 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
514 cmd->header.size = sizeof(cmd->body);
515 cmd->body.cid = res->id;
516 cmd->body.mobid = bo->mem.start;
517 cmd->body.validContents = res->backup_dirty;
518 res->backup_dirty = false;
519 vmw_fifo_commit(dev_priv, sizeof(*cmd));
520
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
540 bool readback)
541{
542 struct vmw_user_context *uctx =
543 container_of(ctx, struct vmw_user_context, res);
544 int i;
545
546 vmw_binding_state_scrub(uctx->cbs);
547 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
548 struct vmw_resource *res;
549
550
551 spin_lock(&uctx->cotable_lock);
552 res = uctx->cotables[vmw_cotable_scrub_order[i]];
553 if (res)
554 res = vmw_resource_reference_unless_doomed(res);
555 spin_unlock(&uctx->cotable_lock);
556 if (!res)
557 continue;
558
559 WARN_ON(vmw_cotable_scrub(res, readback));
560 vmw_resource_unreference(&res);
561 }
562}
563
564static int vmw_dx_context_unbind(struct vmw_resource *res,
565 bool readback,
566 struct ttm_validate_buffer *val_buf)
567{
568 struct vmw_private *dev_priv = res->dev_priv;
569 struct ttm_buffer_object *bo = val_buf->bo;
570 struct vmw_fence_obj *fence;
571 struct vmw_user_context *uctx =
572 container_of(res, struct vmw_user_context, res);
573
574 struct {
575 SVGA3dCmdHeader header;
576 SVGA3dCmdDXReadbackContext body;
577 } *cmd1;
578 struct {
579 SVGA3dCmdHeader header;
580 SVGA3dCmdDXBindContext body;
581 } *cmd2;
582 uint32_t submit_size;
583 uint8_t *cmd;
584
585
586 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
587
588 mutex_lock(&dev_priv->binding_mutex);
589 vmw_dx_context_scrub_cotables(res, readback);
590
591 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
592 readback) {
593 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
594 if (vmw_query_readback_all(uctx->dx_query_mob))
595 DRM_ERROR("Failed to read back query states\n");
596 }
597
598 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
599
600 cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
601 if (unlikely(cmd == NULL)) {
602 mutex_unlock(&dev_priv->binding_mutex);
603 return -ENOMEM;
604 }
605
606 cmd2 = (void *) cmd;
607 if (readback) {
608 cmd1 = (void *) cmd;
609 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
610 cmd1->header.size = sizeof(cmd1->body);
611 cmd1->body.cid = res->id;
612 cmd2 = (void *) (&cmd1[1]);
613 }
614 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
615 cmd2->header.size = sizeof(cmd2->body);
616 cmd2->body.cid = res->id;
617 cmd2->body.mobid = SVGA3D_INVALID_ID;
618
619 vmw_fifo_commit(dev_priv, submit_size);
620 mutex_unlock(&dev_priv->binding_mutex);
621
622
623
624
625
626 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
627 &fence, NULL);
628
629 vmw_bo_fence_single(bo, fence);
630
631 if (likely(fence != NULL))
632 vmw_fence_obj_unreference(&fence);
633
634 return 0;
635}
636
637static int vmw_dx_context_destroy(struct vmw_resource *res)
638{
639 struct vmw_private *dev_priv = res->dev_priv;
640 struct {
641 SVGA3dCmdHeader header;
642 SVGA3dCmdDXDestroyContext body;
643 } *cmd;
644
645 if (likely(res->id == -1))
646 return 0;
647
648 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
649 if (unlikely(cmd == NULL))
650 return -ENOMEM;
651
652 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
653 cmd->header.size = sizeof(cmd->body);
654 cmd->body.cid = res->id;
655 vmw_fifo_commit(dev_priv, sizeof(*cmd));
656 if (dev_priv->query_cid == res->id)
657 dev_priv->query_cid_valid = false;
658 vmw_resource_release_id(res);
659 vmw_fifo_resource_dec(dev_priv);
660
661 return 0;
662}
663
664
665
666
667
668static struct vmw_resource *
669vmw_user_context_base_to_res(struct ttm_base_object *base)
670{
671 return &(container_of(base, struct vmw_user_context, base)->res);
672}
673
674static void vmw_user_context_free(struct vmw_resource *res)
675{
676 struct vmw_user_context *ctx =
677 container_of(res, struct vmw_user_context, res);
678 struct vmw_private *dev_priv = res->dev_priv;
679
680 if (ctx->cbs)
681 vmw_binding_state_free(ctx->cbs);
682
683 (void) vmw_context_bind_dx_query(res, NULL);
684
685 ttm_base_object_kfree(ctx, base);
686 ttm_mem_global_free(vmw_mem_glob(dev_priv),
687 vmw_user_context_size);
688}
689
690
691
692
693
694
695static void vmw_user_context_base_release(struct ttm_base_object **p_base)
696{
697 struct ttm_base_object *base = *p_base;
698 struct vmw_user_context *ctx =
699 container_of(base, struct vmw_user_context, base);
700 struct vmw_resource *res = &ctx->res;
701
702 *p_base = NULL;
703 vmw_resource_unreference(&res);
704}
705
706int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
707 struct drm_file *file_priv)
708{
709 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
710 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
711
712 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
713}
714
715static int vmw_context_define(struct drm_device *dev, void *data,
716 struct drm_file *file_priv, bool dx)
717{
718 struct vmw_private *dev_priv = vmw_priv(dev);
719 struct vmw_user_context *ctx;
720 struct vmw_resource *res;
721 struct vmw_resource *tmp;
722 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
723 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
724 struct ttm_operation_ctx ttm_opt_ctx = {
725 .interruptible = true,
726 .no_wait_gpu = false
727 };
728 int ret;
729
730 if (!dev_priv->has_dx && dx) {
731 VMW_DEBUG_USER("DX contexts not supported by device.\n");
732 return -EINVAL;
733 }
734
735 if (unlikely(vmw_user_context_size == 0))
736 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
737 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
738 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
739
740 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
741 if (unlikely(ret != 0))
742 return ret;
743
744 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
745 vmw_user_context_size,
746 &ttm_opt_ctx);
747 if (unlikely(ret != 0)) {
748 if (ret != -ERESTARTSYS)
749 DRM_ERROR("Out of graphics memory for context"
750 " creation.\n");
751 goto out_unlock;
752 }
753
754 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
755 if (unlikely(!ctx)) {
756 ttm_mem_global_free(vmw_mem_glob(dev_priv),
757 vmw_user_context_size);
758 ret = -ENOMEM;
759 goto out_unlock;
760 }
761
762 res = &ctx->res;
763 ctx->base.shareable = false;
764 ctx->base.tfile = NULL;
765
766
767
768
769
770 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
771 if (unlikely(ret != 0))
772 goto out_unlock;
773
774 tmp = vmw_resource_reference(&ctx->res);
775 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
776 &vmw_user_context_base_release, NULL);
777
778 if (unlikely(ret != 0)) {
779 vmw_resource_unreference(&tmp);
780 goto out_err;
781 }
782
783 arg->cid = ctx->base.handle;
784out_err:
785 vmw_resource_unreference(&res);
786out_unlock:
787 ttm_read_unlock(&dev_priv->reservation_sem);
788 return ret;
789}
790
791int vmw_context_define_ioctl(struct drm_device *dev, void *data,
792 struct drm_file *file_priv)
793{
794 return vmw_context_define(dev, data, file_priv, false);
795}
796
797int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv)
799{
800 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
801 struct drm_vmw_context_arg *rep = &arg->rep;
802
803 switch (arg->req) {
804 case drm_vmw_context_legacy:
805 return vmw_context_define(dev, rep, file_priv, false);
806 case drm_vmw_context_dx:
807 return vmw_context_define(dev, rep, file_priv, true);
808 default:
809 break;
810 }
811 return -EINVAL;
812}
813
814
815
816
817
818
819
820
821
822struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
823{
824 struct vmw_user_context *uctx =
825 container_of(ctx, struct vmw_user_context, res);
826
827 return vmw_binding_state_list(uctx->cbs);
828}
829
830struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
831{
832 return container_of(ctx, struct vmw_user_context, res)->man;
833}
834
835struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
836 SVGACOTableType cotable_type)
837{
838 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
839 return ERR_PTR(-EINVAL);
840
841 return container_of(ctx, struct vmw_user_context, res)->
842 cotables[cotable_type];
843}
844
845
846
847
848
849
850
851
852
853
854struct vmw_ctx_binding_state *
855vmw_context_binding_state(struct vmw_resource *ctx)
856{
857 return container_of(ctx, struct vmw_user_context, res)->cbs;
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
873 struct vmw_buffer_object *mob)
874{
875 struct vmw_user_context *uctx =
876 container_of(ctx_res, struct vmw_user_context, res);
877
878 if (mob == NULL) {
879 if (uctx->dx_query_mob) {
880 uctx->dx_query_mob->dx_query_ctx = NULL;
881 vmw_bo_unreference(&uctx->dx_query_mob);
882 uctx->dx_query_mob = NULL;
883 }
884
885 return 0;
886 }
887
888
889 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
890 return -EINVAL;
891
892 mob->dx_query_ctx = ctx_res;
893
894 if (!uctx->dx_query_mob)
895 uctx->dx_query_mob = vmw_bo_reference(mob);
896
897 return 0;
898}
899
900
901
902
903
904
905struct vmw_buffer_object *
906vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
907{
908 struct vmw_user_context *uctx =
909 container_of(ctx_res, struct vmw_user_context, res);
910
911 return uctx->dx_query_mob;
912}
913