linux/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_placement.h>
  29
  30#include "vmwgfx_drv.h"
  31#include "vmwgfx_resource_priv.h"
  32#include "vmwgfx_binding.h"
  33
  34struct vmw_user_context {
  35        struct ttm_base_object base;
  36        struct vmw_resource res;
  37        struct vmw_ctx_binding_state *cbs;
  38        struct vmw_cmdbuf_res_manager *man;
  39        struct vmw_resource *cotables[SVGA_COTABLE_MAX];
  40        spinlock_t cotable_lock;
  41        struct vmw_buffer_object *dx_query_mob;
  42};
  43
  44static void vmw_user_context_free(struct vmw_resource *res);
  45static struct vmw_resource *
  46vmw_user_context_base_to_res(struct ttm_base_object *base);
  47
  48static int vmw_gb_context_create(struct vmw_resource *res);
  49static int vmw_gb_context_bind(struct vmw_resource *res,
  50                               struct ttm_validate_buffer *val_buf);
  51static int vmw_gb_context_unbind(struct vmw_resource *res,
  52                                 bool readback,
  53                                 struct ttm_validate_buffer *val_buf);
  54static int vmw_gb_context_destroy(struct vmw_resource *res);
  55static int vmw_dx_context_create(struct vmw_resource *res);
  56static int vmw_dx_context_bind(struct vmw_resource *res,
  57                               struct ttm_validate_buffer *val_buf);
  58static int vmw_dx_context_unbind(struct vmw_resource *res,
  59                                 bool readback,
  60                                 struct ttm_validate_buffer *val_buf);
  61static int vmw_dx_context_destroy(struct vmw_resource *res);
  62
  63static uint64_t vmw_user_context_size;
  64
  65static const struct vmw_user_resource_conv user_context_conv = {
  66        .object_type = VMW_RES_CONTEXT,
  67        .base_obj_to_res = vmw_user_context_base_to_res,
  68        .res_free = vmw_user_context_free
  69};
  70
  71const struct vmw_user_resource_conv *user_context_converter =
  72        &user_context_conv;
  73
  74
  75static const struct vmw_res_func vmw_legacy_context_func = {
  76        .res_type = vmw_res_context,
  77        .needs_backup = false,
  78        .may_evict = false,
  79        .type_name = "legacy contexts",
  80        .backup_placement = NULL,
  81        .create = NULL,
  82        .destroy = NULL,
  83        .bind = NULL,
  84        .unbind = NULL
  85};
  86
  87static const struct vmw_res_func vmw_gb_context_func = {
  88        .res_type = vmw_res_context,
  89        .needs_backup = true,
  90        .may_evict = true,
  91        .prio = 3,
  92        .dirty_prio = 3,
  93        .type_name = "guest backed contexts",
  94        .backup_placement = &vmw_mob_placement,
  95        .create = vmw_gb_context_create,
  96        .destroy = vmw_gb_context_destroy,
  97        .bind = vmw_gb_context_bind,
  98        .unbind = vmw_gb_context_unbind
  99};
 100
 101static const struct vmw_res_func vmw_dx_context_func = {
 102        .res_type = vmw_res_dx_context,
 103        .needs_backup = true,
 104        .may_evict = true,
 105        .prio = 3,
 106        .dirty_prio = 3,
 107        .type_name = "dx contexts",
 108        .backup_placement = &vmw_mob_placement,
 109        .create = vmw_dx_context_create,
 110        .destroy = vmw_dx_context_destroy,
 111        .bind = vmw_dx_context_bind,
 112        .unbind = vmw_dx_context_unbind
 113};
 114
 115/**
 116 * Context management:
 117 */
 118
 119static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
 120                                       struct vmw_user_context *uctx)
 121{
 122        struct vmw_resource *res;
 123        int i;
 124        u32 cotable_max = has_sm5_context(dev_priv) ?
 125                SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 126
 127        for (i = 0; i < cotable_max; ++i) {
 128                spin_lock(&uctx->cotable_lock);
 129                res = uctx->cotables[i];
 130                uctx->cotables[i] = NULL;
 131                spin_unlock(&uctx->cotable_lock);
 132
 133                if (res)
 134                        vmw_resource_unreference(&res);
 135        }
 136}
 137
 138static void vmw_hw_context_destroy(struct vmw_resource *res)
 139{
 140        struct vmw_user_context *uctx =
 141                container_of(res, struct vmw_user_context, res);
 142        struct vmw_private *dev_priv = res->dev_priv;
 143        struct {
 144                SVGA3dCmdHeader header;
 145                SVGA3dCmdDestroyContext body;
 146        } *cmd;
 147
 148
 149        if (res->func->destroy == vmw_gb_context_destroy ||
 150            res->func->destroy == vmw_dx_context_destroy) {
 151                mutex_lock(&dev_priv->cmdbuf_mutex);
 152                vmw_cmdbuf_res_man_destroy(uctx->man);
 153                mutex_lock(&dev_priv->binding_mutex);
 154                vmw_binding_state_kill(uctx->cbs);
 155                (void) res->func->destroy(res);
 156                mutex_unlock(&dev_priv->binding_mutex);
 157                if (dev_priv->pinned_bo != NULL &&
 158                    !dev_priv->query_cid_valid)
 159                        __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 160                mutex_unlock(&dev_priv->cmdbuf_mutex);
 161                vmw_context_cotables_unref(dev_priv, uctx);
 162                return;
 163        }
 164
 165        vmw_execbuf_release_pinned_bo(dev_priv);
 166        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 167        if (unlikely(cmd == NULL))
 168                return;
 169
 170        cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
 171        cmd->header.size = sizeof(cmd->body);
 172        cmd->body.cid = res->id;
 173
 174        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 175        vmw_fifo_resource_dec(dev_priv);
 176}
 177
 178static int vmw_gb_context_init(struct vmw_private *dev_priv,
 179                               bool dx,
 180                               struct vmw_resource *res,
 181                               void (*res_free)(struct vmw_resource *res))
 182{
 183        int ret, i;
 184        struct vmw_user_context *uctx =
 185                container_of(res, struct vmw_user_context, res);
 186
 187        res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
 188                            SVGA3D_CONTEXT_DATA_SIZE);
 189        ret = vmw_resource_init(dev_priv, res, true,
 190                                res_free,
 191                                dx ? &vmw_dx_context_func :
 192                                &vmw_gb_context_func);
 193        if (unlikely(ret != 0))
 194                goto out_err;
 195
 196        if (dev_priv->has_mob) {
 197                uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
 198                if (IS_ERR(uctx->man)) {
 199                        ret = PTR_ERR(uctx->man);
 200                        uctx->man = NULL;
 201                        goto out_err;
 202                }
 203        }
 204
 205        uctx->cbs = vmw_binding_state_alloc(dev_priv);
 206        if (IS_ERR(uctx->cbs)) {
 207                ret = PTR_ERR(uctx->cbs);
 208                goto out_err;
 209        }
 210
 211        spin_lock_init(&uctx->cotable_lock);
 212
 213        if (dx) {
 214                u32 cotable_max = has_sm5_context(dev_priv) ?
 215                        SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 216                for (i = 0; i < cotable_max; ++i) {
 217                        uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
 218                                                              &uctx->res, i);
 219                        if (IS_ERR(uctx->cotables[i])) {
 220                                ret = PTR_ERR(uctx->cotables[i]);
 221                                goto out_cotables;
 222                        }
 223                }
 224        }
 225
 226        res->hw_destroy = vmw_hw_context_destroy;
 227        return 0;
 228
 229out_cotables:
 230        vmw_context_cotables_unref(dev_priv, uctx);
 231out_err:
 232        if (res_free)
 233                res_free(res);
 234        else
 235                kfree(res);
 236        return ret;
 237}
 238
 239static int vmw_context_init(struct vmw_private *dev_priv,
 240                            struct vmw_resource *res,
 241                            void (*res_free)(struct vmw_resource *res),
 242                            bool dx)
 243{
 244        int ret;
 245
 246        struct {
 247                SVGA3dCmdHeader header;
 248                SVGA3dCmdDefineContext body;
 249        } *cmd;
 250
 251        if (dev_priv->has_mob)
 252                return vmw_gb_context_init(dev_priv, dx, res, res_free);
 253
 254        ret = vmw_resource_init(dev_priv, res, false,
 255                                res_free, &vmw_legacy_context_func);
 256
 257        if (unlikely(ret != 0)) {
 258                DRM_ERROR("Failed to allocate a resource id.\n");
 259                goto out_early;
 260        }
 261
 262        if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
 263                DRM_ERROR("Out of hw context ids.\n");
 264                vmw_resource_unreference(&res);
 265                return -ENOMEM;
 266        }
 267
 268        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 269        if (unlikely(cmd == NULL)) {
 270                vmw_resource_unreference(&res);
 271                return -ENOMEM;
 272        }
 273
 274        cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
 275        cmd->header.size = sizeof(cmd->body);
 276        cmd->body.cid = res->id;
 277
 278        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 279        vmw_fifo_resource_inc(dev_priv);
 280        res->hw_destroy = vmw_hw_context_destroy;
 281        return 0;
 282
 283out_early:
 284        if (res_free == NULL)
 285                kfree(res);
 286        else
 287                res_free(res);
 288        return ret;
 289}
 290
 291
 292/*
 293 * GB context.
 294 */
 295
 296static int vmw_gb_context_create(struct vmw_resource *res)
 297{
 298        struct vmw_private *dev_priv = res->dev_priv;
 299        int ret;
 300        struct {
 301                SVGA3dCmdHeader header;
 302                SVGA3dCmdDefineGBContext body;
 303        } *cmd;
 304
 305        if (likely(res->id != -1))
 306                return 0;
 307
 308        ret = vmw_resource_alloc_id(res);
 309        if (unlikely(ret != 0)) {
 310                DRM_ERROR("Failed to allocate a context id.\n");
 311                goto out_no_id;
 312        }
 313
 314        if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
 315                ret = -EBUSY;
 316                goto out_no_fifo;
 317        }
 318
 319        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 320        if (unlikely(cmd == NULL)) {
 321                ret = -ENOMEM;
 322                goto out_no_fifo;
 323        }
 324
 325        cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
 326        cmd->header.size = sizeof(cmd->body);
 327        cmd->body.cid = res->id;
 328        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 329        vmw_fifo_resource_inc(dev_priv);
 330
 331        return 0;
 332
 333out_no_fifo:
 334        vmw_resource_release_id(res);
 335out_no_id:
 336        return ret;
 337}
 338
 339static int vmw_gb_context_bind(struct vmw_resource *res,
 340                               struct ttm_validate_buffer *val_buf)
 341{
 342        struct vmw_private *dev_priv = res->dev_priv;
 343        struct {
 344                SVGA3dCmdHeader header;
 345                SVGA3dCmdBindGBContext body;
 346        } *cmd;
 347        struct ttm_buffer_object *bo = val_buf->bo;
 348
 349        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 350
 351        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 352        if (unlikely(cmd == NULL))
 353                return -ENOMEM;
 354
 355        cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 356        cmd->header.size = sizeof(cmd->body);
 357        cmd->body.cid = res->id;
 358        cmd->body.mobid = bo->mem.start;
 359        cmd->body.validContents = res->backup_dirty;
 360        res->backup_dirty = false;
 361        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 362
 363        return 0;
 364}
 365
 366static int vmw_gb_context_unbind(struct vmw_resource *res,
 367                                 bool readback,
 368                                 struct ttm_validate_buffer *val_buf)
 369{
 370        struct vmw_private *dev_priv = res->dev_priv;
 371        struct ttm_buffer_object *bo = val_buf->bo;
 372        struct vmw_fence_obj *fence;
 373        struct vmw_user_context *uctx =
 374                container_of(res, struct vmw_user_context, res);
 375
 376        struct {
 377                SVGA3dCmdHeader header;
 378                SVGA3dCmdReadbackGBContext body;
 379        } *cmd1;
 380        struct {
 381                SVGA3dCmdHeader header;
 382                SVGA3dCmdBindGBContext body;
 383        } *cmd2;
 384        uint32_t submit_size;
 385        uint8_t *cmd;
 386
 387
 388        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 389
 390        mutex_lock(&dev_priv->binding_mutex);
 391        vmw_binding_state_scrub(uctx->cbs);
 392
 393        submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 394
 395        cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
 396        if (unlikely(cmd == NULL)) {
 397                mutex_unlock(&dev_priv->binding_mutex);
 398                return -ENOMEM;
 399        }
 400
 401        cmd2 = (void *) cmd;
 402        if (readback) {
 403                cmd1 = (void *) cmd;
 404                cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
 405                cmd1->header.size = sizeof(cmd1->body);
 406                cmd1->body.cid = res->id;
 407                cmd2 = (void *) (&cmd1[1]);
 408        }
 409        cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 410        cmd2->header.size = sizeof(cmd2->body);
 411        cmd2->body.cid = res->id;
 412        cmd2->body.mobid = SVGA3D_INVALID_ID;
 413
 414        vmw_fifo_commit(dev_priv, submit_size);
 415        mutex_unlock(&dev_priv->binding_mutex);
 416
 417        /*
 418         * Create a fence object and fence the backup buffer.
 419         */
 420
 421        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
 422                                          &fence, NULL);
 423
 424        vmw_bo_fence_single(bo, fence);
 425
 426        if (likely(fence != NULL))
 427                vmw_fence_obj_unreference(&fence);
 428
 429        return 0;
 430}
 431
 432static int vmw_gb_context_destroy(struct vmw_resource *res)
 433{
 434        struct vmw_private *dev_priv = res->dev_priv;
 435        struct {
 436                SVGA3dCmdHeader header;
 437                SVGA3dCmdDestroyGBContext body;
 438        } *cmd;
 439
 440        if (likely(res->id == -1))
 441                return 0;
 442
 443        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 444        if (unlikely(cmd == NULL))
 445                return -ENOMEM;
 446
 447        cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
 448        cmd->header.size = sizeof(cmd->body);
 449        cmd->body.cid = res->id;
 450        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 451        if (dev_priv->query_cid == res->id)
 452                dev_priv->query_cid_valid = false;
 453        vmw_resource_release_id(res);
 454        vmw_fifo_resource_dec(dev_priv);
 455
 456        return 0;
 457}
 458
 459/*
 460 * DX context.
 461 */
 462
 463static int vmw_dx_context_create(struct vmw_resource *res)
 464{
 465        struct vmw_private *dev_priv = res->dev_priv;
 466        int ret;
 467        struct {
 468                SVGA3dCmdHeader header;
 469                SVGA3dCmdDXDefineContext body;
 470        } *cmd;
 471
 472        if (likely(res->id != -1))
 473                return 0;
 474
 475        ret = vmw_resource_alloc_id(res);
 476        if (unlikely(ret != 0)) {
 477                DRM_ERROR("Failed to allocate a context id.\n");
 478                goto out_no_id;
 479        }
 480
 481        if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
 482                ret = -EBUSY;
 483                goto out_no_fifo;
 484        }
 485
 486        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 487        if (unlikely(cmd == NULL)) {
 488                ret = -ENOMEM;
 489                goto out_no_fifo;
 490        }
 491
 492        cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
 493        cmd->header.size = sizeof(cmd->body);
 494        cmd->body.cid = res->id;
 495        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 496        vmw_fifo_resource_inc(dev_priv);
 497
 498        return 0;
 499
 500out_no_fifo:
 501        vmw_resource_release_id(res);
 502out_no_id:
 503        return ret;
 504}
 505
 506static int vmw_dx_context_bind(struct vmw_resource *res,
 507                               struct ttm_validate_buffer *val_buf)
 508{
 509        struct vmw_private *dev_priv = res->dev_priv;
 510        struct {
 511                SVGA3dCmdHeader header;
 512                SVGA3dCmdDXBindContext body;
 513        } *cmd;
 514        struct ttm_buffer_object *bo = val_buf->bo;
 515
 516        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 517
 518        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 519        if (unlikely(cmd == NULL))
 520                return -ENOMEM;
 521
 522        cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
 523        cmd->header.size = sizeof(cmd->body);
 524        cmd->body.cid = res->id;
 525        cmd->body.mobid = bo->mem.start;
 526        cmd->body.validContents = res->backup_dirty;
 527        res->backup_dirty = false;
 528        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 529
 530
 531        return 0;
 532}
 533
 534/**
 535 * vmw_dx_context_scrub_cotables - Scrub all bindings and
 536 * cotables from a context
 537 *
 538 * @ctx: Pointer to the context resource
 539 * @readback: Whether to save the otable contents on scrubbing.
 540 *
 541 * COtables must be unbound before their context, but unbinding requires
 542 * the backup buffer being reserved, whereas scrubbing does not.
 543 * This function scrubs all cotables of a context, potentially reading back
 544 * the contents into their backup buffers. However, scrubbing cotables
 545 * also makes the device context invalid, so scrub all bindings first so
 546 * that doesn't have to be done later with an invalid context.
 547 */
 548void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
 549                                   bool readback)
 550{
 551        struct vmw_user_context *uctx =
 552                container_of(ctx, struct vmw_user_context, res);
 553        u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 554                SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 555        int i;
 556
 557        vmw_binding_state_scrub(uctx->cbs);
 558        for (i = 0; i < cotable_max; ++i) {
 559                struct vmw_resource *res;
 560
 561                /* Avoid racing with ongoing cotable destruction. */
 562                spin_lock(&uctx->cotable_lock);
 563                res = uctx->cotables[vmw_cotable_scrub_order[i]];
 564                if (res)
 565                        res = vmw_resource_reference_unless_doomed(res);
 566                spin_unlock(&uctx->cotable_lock);
 567                if (!res)
 568                        continue;
 569
 570                WARN_ON(vmw_cotable_scrub(res, readback));
 571                vmw_resource_unreference(&res);
 572        }
 573}
 574
 575static int vmw_dx_context_unbind(struct vmw_resource *res,
 576                                 bool readback,
 577                                 struct ttm_validate_buffer *val_buf)
 578{
 579        struct vmw_private *dev_priv = res->dev_priv;
 580        struct ttm_buffer_object *bo = val_buf->bo;
 581        struct vmw_fence_obj *fence;
 582        struct vmw_user_context *uctx =
 583                container_of(res, struct vmw_user_context, res);
 584
 585        struct {
 586                SVGA3dCmdHeader header;
 587                SVGA3dCmdDXReadbackContext body;
 588        } *cmd1;
 589        struct {
 590                SVGA3dCmdHeader header;
 591                SVGA3dCmdDXBindContext body;
 592        } *cmd2;
 593        uint32_t submit_size;
 594        uint8_t *cmd;
 595
 596
 597        BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 598
 599        mutex_lock(&dev_priv->binding_mutex);
 600        vmw_dx_context_scrub_cotables(res, readback);
 601
 602        if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
 603            readback) {
 604                WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
 605                if (vmw_query_readback_all(uctx->dx_query_mob))
 606                        DRM_ERROR("Failed to read back query states\n");
 607        }
 608
 609        submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 610
 611        cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
 612        if (unlikely(cmd == NULL)) {
 613                mutex_unlock(&dev_priv->binding_mutex);
 614                return -ENOMEM;
 615        }
 616
 617        cmd2 = (void *) cmd;
 618        if (readback) {
 619                cmd1 = (void *) cmd;
 620                cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
 621                cmd1->header.size = sizeof(cmd1->body);
 622                cmd1->body.cid = res->id;
 623                cmd2 = (void *) (&cmd1[1]);
 624        }
 625        cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
 626        cmd2->header.size = sizeof(cmd2->body);
 627        cmd2->body.cid = res->id;
 628        cmd2->body.mobid = SVGA3D_INVALID_ID;
 629
 630        vmw_fifo_commit(dev_priv, submit_size);
 631        mutex_unlock(&dev_priv->binding_mutex);
 632
 633        /*
 634         * Create a fence object and fence the backup buffer.
 635         */
 636
 637        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
 638                                          &fence, NULL);
 639
 640        vmw_bo_fence_single(bo, fence);
 641
 642        if (likely(fence != NULL))
 643                vmw_fence_obj_unreference(&fence);
 644
 645        return 0;
 646}
 647
 648static int vmw_dx_context_destroy(struct vmw_resource *res)
 649{
 650        struct vmw_private *dev_priv = res->dev_priv;
 651        struct {
 652                SVGA3dCmdHeader header;
 653                SVGA3dCmdDXDestroyContext body;
 654        } *cmd;
 655
 656        if (likely(res->id == -1))
 657                return 0;
 658
 659        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 660        if (unlikely(cmd == NULL))
 661                return -ENOMEM;
 662
 663        cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
 664        cmd->header.size = sizeof(cmd->body);
 665        cmd->body.cid = res->id;
 666        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 667        if (dev_priv->query_cid == res->id)
 668                dev_priv->query_cid_valid = false;
 669        vmw_resource_release_id(res);
 670        vmw_fifo_resource_dec(dev_priv);
 671
 672        return 0;
 673}
 674
 675/**
 676 * User-space context management:
 677 */
 678
 679static struct vmw_resource *
 680vmw_user_context_base_to_res(struct ttm_base_object *base)
 681{
 682        return &(container_of(base, struct vmw_user_context, base)->res);
 683}
 684
 685static void vmw_user_context_free(struct vmw_resource *res)
 686{
 687        struct vmw_user_context *ctx =
 688            container_of(res, struct vmw_user_context, res);
 689        struct vmw_private *dev_priv = res->dev_priv;
 690
 691        if (ctx->cbs)
 692                vmw_binding_state_free(ctx->cbs);
 693
 694        (void) vmw_context_bind_dx_query(res, NULL);
 695
 696        ttm_base_object_kfree(ctx, base);
 697        ttm_mem_global_free(vmw_mem_glob(dev_priv),
 698                            vmw_user_context_size);
 699}
 700
 701/**
 702 * This function is called when user space has no more references on the
 703 * base object. It releases the base-object's reference on the resource object.
 704 */
 705
 706static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 707{
 708        struct ttm_base_object *base = *p_base;
 709        struct vmw_user_context *ctx =
 710            container_of(base, struct vmw_user_context, base);
 711        struct vmw_resource *res = &ctx->res;
 712
 713        *p_base = NULL;
 714        vmw_resource_unreference(&res);
 715}
 716
 717int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 718                              struct drm_file *file_priv)
 719{
 720        struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 721        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 722
 723        return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 724}
 725
 726static int vmw_context_define(struct drm_device *dev, void *data,
 727                              struct drm_file *file_priv, bool dx)
 728{
 729        struct vmw_private *dev_priv = vmw_priv(dev);
 730        struct vmw_user_context *ctx;
 731        struct vmw_resource *res;
 732        struct vmw_resource *tmp;
 733        struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 734        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 735        struct ttm_operation_ctx ttm_opt_ctx = {
 736                .interruptible = true,
 737                .no_wait_gpu = false
 738        };
 739        int ret;
 740
 741        if (!has_sm4_context(dev_priv) && dx) {
 742                VMW_DEBUG_USER("DX contexts not supported by device.\n");
 743                return -EINVAL;
 744        }
 745
 746        if (unlikely(vmw_user_context_size == 0))
 747                vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
 748                  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
 749                  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 750
 751        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 752        if (unlikely(ret != 0))
 753                return ret;
 754
 755        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 756                                   vmw_user_context_size,
 757                                   &ttm_opt_ctx);
 758        if (unlikely(ret != 0)) {
 759                if (ret != -ERESTARTSYS)
 760                        DRM_ERROR("Out of graphics memory for context"
 761                                  " creation.\n");
 762                goto out_unlock;
 763        }
 764
 765        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 766        if (unlikely(!ctx)) {
 767                ttm_mem_global_free(vmw_mem_glob(dev_priv),
 768                                    vmw_user_context_size);
 769                ret = -ENOMEM;
 770                goto out_unlock;
 771        }
 772
 773        res = &ctx->res;
 774        ctx->base.shareable = false;
 775        ctx->base.tfile = NULL;
 776
 777        /*
 778         * From here on, the destructor takes over resource freeing.
 779         */
 780
 781        ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 782        if (unlikely(ret != 0))
 783                goto out_unlock;
 784
 785        tmp = vmw_resource_reference(&ctx->res);
 786        ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
 787                                   &vmw_user_context_base_release, NULL);
 788
 789        if (unlikely(ret != 0)) {
 790                vmw_resource_unreference(&tmp);
 791                goto out_err;
 792        }
 793
 794        arg->cid = ctx->base.handle;
 795out_err:
 796        vmw_resource_unreference(&res);
 797out_unlock:
 798        ttm_read_unlock(&dev_priv->reservation_sem);
 799        return ret;
 800}
 801
 802int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 803                             struct drm_file *file_priv)
 804{
 805        return vmw_context_define(dev, data, file_priv, false);
 806}
 807
 808int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
 809                                      struct drm_file *file_priv)
 810{
 811        union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
 812        struct drm_vmw_context_arg *rep = &arg->rep;
 813
 814        switch (arg->req) {
 815        case drm_vmw_context_legacy:
 816                return vmw_context_define(dev, rep, file_priv, false);
 817        case drm_vmw_context_dx:
 818                return vmw_context_define(dev, rep, file_priv, true);
 819        default:
 820                break;
 821        }
 822        return -EINVAL;
 823}
 824
 825/**
 826 * vmw_context_binding_list - Return a list of context bindings
 827 *
 828 * @ctx: The context resource
 829 *
 830 * Returns the current list of bindings of the given context. Note that
 831 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
 832 */
 833struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 834{
 835        struct vmw_user_context *uctx =
 836                container_of(ctx, struct vmw_user_context, res);
 837
 838        return vmw_binding_state_list(uctx->cbs);
 839}
 840
 841struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 842{
 843        return container_of(ctx, struct vmw_user_context, res)->man;
 844}
 845
 846struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
 847                                         SVGACOTableType cotable_type)
 848{
 849        u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 850                SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 851
 852        if (cotable_type >= cotable_max)
 853                return ERR_PTR(-EINVAL);
 854
 855        return container_of(ctx, struct vmw_user_context, res)->
 856                cotables[cotable_type];
 857}
 858
 859/**
 860 * vmw_context_binding_state -
 861 * Return a pointer to a context binding state structure
 862 *
 863 * @ctx: The context resource
 864 *
 865 * Returns the current state of bindings of the given context. Note that
 866 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
 867 */
 868struct vmw_ctx_binding_state *
 869vmw_context_binding_state(struct vmw_resource *ctx)
 870{
 871        return container_of(ctx, struct vmw_user_context, res)->cbs;
 872}
 873
 874/**
 875 * vmw_context_bind_dx_query -
 876 * Sets query MOB for the context.  If @mob is NULL, then this function will
 877 * remove the association between the MOB and the context.  This function
 878 * assumes the binding_mutex is held.
 879 *
 880 * @ctx_res: The context resource
 881 * @mob: a reference to the query MOB
 882 *
 883 * Returns -EINVAL if a MOB has already been set and does not match the one
 884 * specified in the parameter.  0 otherwise.
 885 */
 886int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
 887                              struct vmw_buffer_object *mob)
 888{
 889        struct vmw_user_context *uctx =
 890                container_of(ctx_res, struct vmw_user_context, res);
 891
 892        if (mob == NULL) {
 893                if (uctx->dx_query_mob) {
 894                        uctx->dx_query_mob->dx_query_ctx = NULL;
 895                        vmw_bo_unreference(&uctx->dx_query_mob);
 896                        uctx->dx_query_mob = NULL;
 897                }
 898
 899                return 0;
 900        }
 901
 902        /* Can only have one MOB per context for queries */
 903        if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
 904                return -EINVAL;
 905
 906        mob->dx_query_ctx  = ctx_res;
 907
 908        if (!uctx->dx_query_mob)
 909                uctx->dx_query_mob = vmw_bo_reference(mob);
 910
 911        return 0;
 912}
 913
 914/**
 915 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
 916 *
 917 * @ctx_res: The context resource
 918 */
 919struct vmw_buffer_object *
 920vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
 921{
 922        struct vmw_user_context *uctx =
 923                container_of(ctx_res, struct vmw_user_context, res);
 924
 925        return uctx->dx_query_mob;
 926}
 927