linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/sync_file.h>
  28
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_reg.h"
  31#include <drm/ttm/ttm_bo_api.h>
  32#include <drm/ttm/ttm_placement.h>
  33#include "vmwgfx_so.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_HT_ORDER 12
  37
  38/**
  39 * enum vmw_resource_relocation_type - Relocation type for resources
  40 *
  41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  42 * command stream is replaced with the actual id after validation.
  43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  44 * with a NOP.
  45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
  46 * after validation is -1, the command is replaced with a NOP. Otherwise no
  47 * action.
  48 */
  49enum vmw_resource_relocation_type {
  50        vmw_res_rel_normal,
  51        vmw_res_rel_nop,
  52        vmw_res_rel_cond_nop,
  53        vmw_res_rel_max
  54};
  55
  56/**
  57 * struct vmw_resource_relocation - Relocation info for resources
  58 *
  59 * @head: List head for the software context's relocation list.
  60 * @res: Non-ref-counted pointer to the resource.
  61 * @offset: Offset of single byte entries into the command buffer where the
  62 * id that needs fixup is located.
  63 * @rel_type: Type of relocation.
  64 */
  65struct vmw_resource_relocation {
  66        struct list_head head;
  67        const struct vmw_resource *res;
  68        u32 offset:29;
  69        enum vmw_resource_relocation_type rel_type:3;
  70};
  71
  72/**
  73 * struct vmw_resource_val_node - Validation info for resources
  74 *
  75 * @head: List head for the software context's resource list.
  76 * @hash: Hash entry for quick resouce to val_node lookup.
  77 * @res: Ref-counted pointer to the resource.
  78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  79 * @new_backup: Refcounted pointer to the new backup buffer.
  80 * @staged_bindings: If @res is a context, tracks bindings set up during
  81 * the command batch. Otherwise NULL.
  82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  83 * @first_usage: Set to true the first time the resource is referenced in
  84 * the command stream.
  85 * @switching_backup: The command stream provides a new backup buffer for a
  86 * resource.
  87 * @no_buffer_needed: This means @switching_backup is true on first buffer
  88 * reference. So resource reservation does not need to allocate a backup
  89 * buffer for the resource.
  90 */
  91struct vmw_resource_val_node {
  92        struct list_head head;
  93        struct drm_hash_item hash;
  94        struct vmw_resource *res;
  95        struct vmw_dma_buffer *new_backup;
  96        struct vmw_ctx_binding_state *staged_bindings;
  97        unsigned long new_backup_offset;
  98        u32 first_usage : 1;
  99        u32 switching_backup : 1;
 100        u32 no_buffer_needed : 1;
 101};
 102
 103/**
 104 * struct vmw_cmd_entry - Describe a command for the verifier
 105 *
 106 * @user_allow: Whether allowed from the execbuf ioctl.
 107 * @gb_disable: Whether disabled if guest-backed objects are available.
 108 * @gb_enable: Whether enabled iff guest-backed objects are available.
 109 */
 110struct vmw_cmd_entry {
 111        int (*func) (struct vmw_private *, struct vmw_sw_context *,
 112                     SVGA3dCmdHeader *);
 113        bool user_allow;
 114        bool gb_disable;
 115        bool gb_enable;
 116        const char *cmd_name;
 117};
 118
 119#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
 120        [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 121                                       (_gb_disable), (_gb_enable), #_cmd}
 122
 123static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 124                                        struct vmw_sw_context *sw_context,
 125                                        struct vmw_resource *ctx);
 126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 127                                 struct vmw_sw_context *sw_context,
 128                                 SVGAMobId *id,
 129                                 struct vmw_dma_buffer **vmw_bo_p);
 130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 131                                   struct vmw_dma_buffer *vbo,
 132                                   bool validate_as_mob,
 133                                   uint32_t *p_val_node);
 134/**
 135 * vmw_ptr_diff - Compute the offset from a to b in bytes
 136 *
 137 * @a: A starting pointer.
 138 * @b: A pointer offset in the same address space.
 139 *
 140 * Returns: The offset in bytes between the two pointers.
 141 */
 142static size_t vmw_ptr_diff(void *a, void *b)
 143{
 144        return (unsigned long) b - (unsigned long) a;
 145}
 146
 147/**
 148 * vmw_resources_unreserve - unreserve resources previously reserved for
 149 * command submission.
 150 *
 151 * @sw_context: pointer to the software context
 152 * @backoff: Whether command submission failed.
 153 */
 154static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
 155                                    bool backoff)
 156{
 157        struct vmw_resource_val_node *val;
 158        struct list_head *list = &sw_context->resource_list;
 159
 160        if (sw_context->dx_query_mob && !backoff)
 161                vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 162                                          sw_context->dx_query_mob);
 163
 164        list_for_each_entry(val, list, head) {
 165                struct vmw_resource *res = val->res;
 166                bool switch_backup =
 167                        (backoff) ? false : val->switching_backup;
 168
 169                /*
 170                 * Transfer staged context bindings to the
 171                 * persistent context binding tracker.
 172                 */
 173                if (unlikely(val->staged_bindings)) {
 174                        if (!backoff) {
 175                                vmw_binding_state_commit
 176                                        (vmw_context_binding_state(val->res),
 177                                         val->staged_bindings);
 178                        }
 179
 180                        if (val->staged_bindings != sw_context->staged_bindings)
 181                                vmw_binding_state_free(val->staged_bindings);
 182                        else
 183                                sw_context->staged_bindings_inuse = false;
 184                        val->staged_bindings = NULL;
 185                }
 186                vmw_resource_unreserve(res, switch_backup, val->new_backup,
 187                                       val->new_backup_offset);
 188                vmw_dmabuf_unreference(&val->new_backup);
 189        }
 190}
 191
 192/**
 193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
 194 * added to the validate list.
 195 *
 196 * @dev_priv: Pointer to the device private:
 197 * @sw_context: The validation context:
 198 * @node: The validation node holding this context.
 199 */
 200static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 201                                   struct vmw_sw_context *sw_context,
 202                                   struct vmw_resource_val_node *node)
 203{
 204        int ret;
 205
 206        ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
 207        if (unlikely(ret != 0))
 208                goto out_err;
 209
 210        if (!sw_context->staged_bindings) {
 211                sw_context->staged_bindings =
 212                        vmw_binding_state_alloc(dev_priv);
 213                if (IS_ERR(sw_context->staged_bindings)) {
 214                        DRM_ERROR("Failed to allocate context binding "
 215                                  "information.\n");
 216                        ret = PTR_ERR(sw_context->staged_bindings);
 217                        sw_context->staged_bindings = NULL;
 218                        goto out_err;
 219                }
 220        }
 221
 222        if (sw_context->staged_bindings_inuse) {
 223                node->staged_bindings = vmw_binding_state_alloc(dev_priv);
 224                if (IS_ERR(node->staged_bindings)) {
 225                        DRM_ERROR("Failed to allocate context binding "
 226                                  "information.\n");
 227                        ret = PTR_ERR(node->staged_bindings);
 228                        node->staged_bindings = NULL;
 229                        goto out_err;
 230                }
 231        } else {
 232                node->staged_bindings = sw_context->staged_bindings;
 233                sw_context->staged_bindings_inuse = true;
 234        }
 235
 236        return 0;
 237out_err:
 238        return ret;
 239}
 240
 241/**
 242 * vmw_resource_val_add - Add a resource to the software context's
 243 * resource list if it's not already on it.
 244 *
 245 * @sw_context: Pointer to the software context.
 246 * @res: Pointer to the resource.
 247 * @p_node On successful return points to a valid pointer to a
 248 * struct vmw_resource_val_node, if non-NULL on entry.
 249 */
 250static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 251                                struct vmw_resource *res,
 252                                struct vmw_resource_val_node **p_node)
 253{
 254        struct vmw_private *dev_priv = res->dev_priv;
 255        struct vmw_resource_val_node *node;
 256        struct drm_hash_item *hash;
 257        int ret;
 258
 259        if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 260                                    &hash) == 0)) {
 261                node = container_of(hash, struct vmw_resource_val_node, hash);
 262                node->first_usage = false;
 263                if (unlikely(p_node != NULL))
 264                        *p_node = node;
 265                return 0;
 266        }
 267
 268        node = kzalloc(sizeof(*node), GFP_KERNEL);
 269        if (unlikely(!node)) {
 270                DRM_ERROR("Failed to allocate a resource validation "
 271                          "entry.\n");
 272                return -ENOMEM;
 273        }
 274
 275        node->hash.key = (unsigned long) res;
 276        ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 277        if (unlikely(ret != 0)) {
 278                DRM_ERROR("Failed to initialize a resource validation "
 279                          "entry.\n");
 280                kfree(node);
 281                return ret;
 282        }
 283        node->res = vmw_resource_reference(res);
 284        node->first_usage = true;
 285        if (unlikely(p_node != NULL))
 286                *p_node = node;
 287
 288        if (!dev_priv->has_mob) {
 289                list_add_tail(&node->head, &sw_context->resource_list);
 290                return 0;
 291        }
 292
 293        switch (vmw_res_type(res)) {
 294        case vmw_res_context:
 295        case vmw_res_dx_context:
 296                list_add(&node->head, &sw_context->ctx_resource_list);
 297                ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
 298                break;
 299        case vmw_res_cotable:
 300                list_add_tail(&node->head, &sw_context->ctx_resource_list);
 301                break;
 302        default:
 303                list_add_tail(&node->head, &sw_context->resource_list);
 304                break;
 305        }
 306
 307        return ret;
 308}
 309
 310/**
 311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
 312 * to the validation list
 313 *
 314 * @sw_context: The software context holding the validation list.
 315 * @view: Pointer to the view resource.
 316 *
 317 * Returns 0 if success, negative error code otherwise.
 318 */
 319static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 320                                struct vmw_resource *view)
 321{
 322        int ret;
 323
 324        /*
 325         * First add the resource the view is pointing to, otherwise
 326         * it may be swapped out when the view is validated.
 327         */
 328        ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
 329        if (ret)
 330                return ret;
 331
 332        return vmw_resource_val_add(sw_context, view, NULL);
 333}
 334
 335/**
 336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
 337 * pointing to to the validation list.
 338 *
 339 * @sw_context: The software context holding the validation list.
 340 * @view_type: The view type to look up.
 341 * @id: view id of the view.
 342 *
 343 * The view is represented by a view id and the DX context it's created on,
 344 * or scheduled for creation on. If there is no DX context set, the function
 345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
 346 */
 347static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 348                               enum vmw_view_type view_type, u32 id)
 349{
 350        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 351        struct vmw_resource *view;
 352        int ret;
 353
 354        if (!ctx_node) {
 355                DRM_ERROR("DX Context not set.\n");
 356                return -EINVAL;
 357        }
 358
 359        view = vmw_view_lookup(sw_context->man, view_type, id);
 360        if (IS_ERR(view))
 361                return PTR_ERR(view);
 362
 363        ret = vmw_view_res_val_add(sw_context, view);
 364        vmw_resource_unreference(&view);
 365
 366        return ret;
 367}
 368
 369/**
 370 * vmw_resource_context_res_add - Put resources previously bound to a context on
 371 * the validation list
 372 *
 373 * @dev_priv: Pointer to a device private structure
 374 * @sw_context: Pointer to a software context used for this command submission
 375 * @ctx: Pointer to the context resource
 376 *
 377 * This function puts all resources that were previously bound to @ctx on
 378 * the resource validation list. This is part of the context state reemission
 379 */
 380static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 381                                        struct vmw_sw_context *sw_context,
 382                                        struct vmw_resource *ctx)
 383{
 384        struct list_head *binding_list;
 385        struct vmw_ctx_bindinfo *entry;
 386        int ret = 0;
 387        struct vmw_resource *res;
 388        u32 i;
 389
 390        /* Add all cotables to the validation list. */
 391        if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 392                for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 393                        res = vmw_context_cotable(ctx, i);
 394                        if (IS_ERR(res))
 395                                continue;
 396
 397                        ret = vmw_resource_val_add(sw_context, res, NULL);
 398                        vmw_resource_unreference(&res);
 399                        if (unlikely(ret != 0))
 400                                return ret;
 401                }
 402        }
 403
 404
 405        /* Add all resources bound to the context to the validation list */
 406        mutex_lock(&dev_priv->binding_mutex);
 407        binding_list = vmw_context_binding_list(ctx);
 408
 409        list_for_each_entry(entry, binding_list, ctx_list) {
 410                /* entry->res is not refcounted */
 411                res = vmw_resource_reference_unless_doomed(entry->res);
 412                if (unlikely(res == NULL))
 413                        continue;
 414
 415                if (vmw_res_type(entry->res) == vmw_res_view)
 416                        ret = vmw_view_res_val_add(sw_context, entry->res);
 417                else
 418                        ret = vmw_resource_val_add(sw_context, entry->res,
 419                                                   NULL);
 420                vmw_resource_unreference(&res);
 421                if (unlikely(ret != 0))
 422                        break;
 423        }
 424
 425        if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 426                struct vmw_dma_buffer *dx_query_mob;
 427
 428                dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 429                if (dx_query_mob)
 430                        ret = vmw_bo_to_validate_list(sw_context,
 431                                                      dx_query_mob,
 432                                                      true, NULL);
 433        }
 434
 435        mutex_unlock(&dev_priv->binding_mutex);
 436        return ret;
 437}
 438
 439/**
 440 * vmw_resource_relocation_add - Add a relocation to the relocation list
 441 *
 442 * @list: Pointer to head of relocation list.
 443 * @res: The resource.
 444 * @offset: Offset into the command buffer currently being parsed where the
 445 * id that needs fixup is located. Granularity is one byte.
 446 * @rel_type: Relocation type.
 447 */
 448static int vmw_resource_relocation_add(struct list_head *list,
 449                                       const struct vmw_resource *res,
 450                                       unsigned long offset,
 451                                       enum vmw_resource_relocation_type
 452                                       rel_type)
 453{
 454        struct vmw_resource_relocation *rel;
 455
 456        rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 457        if (unlikely(!rel)) {
 458                DRM_ERROR("Failed to allocate a resource relocation.\n");
 459                return -ENOMEM;
 460        }
 461
 462        rel->res = res;
 463        rel->offset = offset;
 464        rel->rel_type = rel_type;
 465        list_add_tail(&rel->head, list);
 466
 467        return 0;
 468}
 469
 470/**
 471 * vmw_resource_relocations_free - Free all relocations on a list
 472 *
 473 * @list: Pointer to the head of the relocation list.
 474 */
 475static void vmw_resource_relocations_free(struct list_head *list)
 476{
 477        struct vmw_resource_relocation *rel, *n;
 478
 479        list_for_each_entry_safe(rel, n, list, head) {
 480                list_del(&rel->head);
 481                kfree(rel);
 482        }
 483}
 484
 485/**
 486 * vmw_resource_relocations_apply - Apply all relocations on a list
 487 *
 488 * @cb: Pointer to the start of the command buffer bein patch. This need
 489 * not be the same buffer as the one being parsed when the relocation
 490 * list was built, but the contents must be the same modulo the
 491 * resource ids.
 492 * @list: Pointer to the head of the relocation list.
 493 */
 494static void vmw_resource_relocations_apply(uint32_t *cb,
 495                                           struct list_head *list)
 496{
 497        struct vmw_resource_relocation *rel;
 498
 499        /* Validate the struct vmw_resource_relocation member size */
 500        BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 501        BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 502
 503        list_for_each_entry(rel, list, head) {
 504                u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 505                switch (rel->rel_type) {
 506                case vmw_res_rel_normal:
 507                        *addr = rel->res->id;
 508                        break;
 509                case vmw_res_rel_nop:
 510                        *addr = SVGA_3D_CMD_NOP;
 511                        break;
 512                default:
 513                        if (rel->res->id == -1)
 514                                *addr = SVGA_3D_CMD_NOP;
 515                        break;
 516                }
 517        }
 518}
 519
 520static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 521                           struct vmw_sw_context *sw_context,
 522                           SVGA3dCmdHeader *header)
 523{
 524        return -EINVAL;
 525}
 526
 527static int vmw_cmd_ok(struct vmw_private *dev_priv,
 528                      struct vmw_sw_context *sw_context,
 529                      SVGA3dCmdHeader *header)
 530{
 531        return 0;
 532}
 533
 534/**
 535 * vmw_bo_to_validate_list - add a bo to a validate list
 536 *
 537 * @sw_context: The software context used for this command submission batch.
 538 * @bo: The buffer object to add.
 539 * @validate_as_mob: Validate this buffer as a MOB.
 540 * @p_val_node: If non-NULL Will be updated with the validate node number
 541 * on return.
 542 *
 543 * Returns -EINVAL if the limit of number of buffer objects per command
 544 * submission is reached.
 545 */
 546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 547                                   struct vmw_dma_buffer *vbo,
 548                                   bool validate_as_mob,
 549                                   uint32_t *p_val_node)
 550{
 551        uint32_t val_node;
 552        struct vmw_validate_buffer *vval_buf;
 553        struct ttm_validate_buffer *val_buf;
 554        struct drm_hash_item *hash;
 555        int ret;
 556
 557        if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 558                                    &hash) == 0)) {
 559                vval_buf = container_of(hash, struct vmw_validate_buffer,
 560                                        hash);
 561                if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 562                        DRM_ERROR("Inconsistent buffer usage.\n");
 563                        return -EINVAL;
 564                }
 565                val_buf = &vval_buf->base;
 566                val_node = vval_buf - sw_context->val_bufs;
 567        } else {
 568                val_node = sw_context->cur_val_buf;
 569                if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 570                        DRM_ERROR("Max number of DMA buffers per submission "
 571                                  "exceeded.\n");
 572                        return -EINVAL;
 573                }
 574                vval_buf = &sw_context->val_bufs[val_node];
 575                vval_buf->hash.key = (unsigned long) vbo;
 576                ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 577                if (unlikely(ret != 0)) {
 578                        DRM_ERROR("Failed to initialize a buffer validation "
 579                                  "entry.\n");
 580                        return ret;
 581                }
 582                ++sw_context->cur_val_buf;
 583                val_buf = &vval_buf->base;
 584                val_buf->bo = ttm_bo_reference(&vbo->base);
 585                val_buf->shared = false;
 586                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 587                vval_buf->validate_as_mob = validate_as_mob;
 588        }
 589
 590        if (p_val_node)
 591                *p_val_node = val_node;
 592
 593        return 0;
 594}
 595
 596/**
 597 * vmw_resources_reserve - Reserve all resources on the sw_context's
 598 * resource list.
 599 *
 600 * @sw_context: Pointer to the software context.
 601 *
 602 * Note that since vmware's command submission currently is protected by
 603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 604 * since only a single thread at once will attempt this.
 605 */
 606static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 607{
 608        struct vmw_resource_val_node *val;
 609        int ret = 0;
 610
 611        list_for_each_entry(val, &sw_context->resource_list, head) {
 612                struct vmw_resource *res = val->res;
 613
 614                ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 615                if (unlikely(ret != 0))
 616                        return ret;
 617
 618                if (res->backup) {
 619                        struct vmw_dma_buffer *vbo = res->backup;
 620
 621                        ret = vmw_bo_to_validate_list
 622                                (sw_context, vbo,
 623                                 vmw_resource_needs_backup(res), NULL);
 624
 625                        if (unlikely(ret != 0))
 626                                return ret;
 627                }
 628        }
 629
 630        if (sw_context->dx_query_mob) {
 631                struct vmw_dma_buffer *expected_dx_query_mob;
 632
 633                expected_dx_query_mob =
 634                        vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 635                if (expected_dx_query_mob &&
 636                    expected_dx_query_mob != sw_context->dx_query_mob) {
 637                        ret = -EINVAL;
 638                }
 639        }
 640
 641        return ret;
 642}
 643
 644/**
 645 * vmw_resources_validate - Validate all resources on the sw_context's
 646 * resource list.
 647 *
 648 * @sw_context: Pointer to the software context.
 649 *
 650 * Before this function is called, all resource backup buffers must have
 651 * been validated.
 652 */
 653static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 654{
 655        struct vmw_resource_val_node *val;
 656        int ret;
 657
 658        list_for_each_entry(val, &sw_context->resource_list, head) {
 659                struct vmw_resource *res = val->res;
 660                struct vmw_dma_buffer *backup = res->backup;
 661
 662                ret = vmw_resource_validate(res);
 663                if (unlikely(ret != 0)) {
 664                        if (ret != -ERESTARTSYS)
 665                                DRM_ERROR("Failed to validate resource.\n");
 666                        return ret;
 667                }
 668
 669                /* Check if the resource switched backup buffer */
 670                if (backup && res->backup && (backup != res->backup)) {
 671                        struct vmw_dma_buffer *vbo = res->backup;
 672
 673                        ret = vmw_bo_to_validate_list
 674                                (sw_context, vbo,
 675                                 vmw_resource_needs_backup(res), NULL);
 676                        if (ret) {
 677                                ttm_bo_unreserve(&vbo->base);
 678                                return ret;
 679                        }
 680                }
 681        }
 682        return 0;
 683}
 684
 685/**
 686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
 687 * relocation- and validation lists.
 688 *
 689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 690 * @sw_context: Pointer to the software context.
 691 * @id_loc: Pointer to where the id that needs translation is located.
 692 * @res: Valid pointer to a struct vmw_resource.
 693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
 694 * used for this resource is returned here.
 695 */
 696static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 697                                 struct vmw_sw_context *sw_context,
 698                                 uint32_t *id_loc,
 699                                 struct vmw_resource *res,
 700                                 struct vmw_resource_val_node **p_val)
 701{
 702        int ret;
 703        struct vmw_resource_val_node *node;
 704
 705        *p_val = NULL;
 706        ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 707                                          res,
 708                                          vmw_ptr_diff(sw_context->buf_start,
 709                                                       id_loc),
 710                                          vmw_res_rel_normal);
 711        if (unlikely(ret != 0))
 712                return ret;
 713
 714        ret = vmw_resource_val_add(sw_context, res, &node);
 715        if (unlikely(ret != 0))
 716                return ret;
 717
 718        if (p_val)
 719                *p_val = node;
 720
 721        return 0;
 722}
 723
 724
 725/**
 726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 727 * on the resource validate list unless it's already there.
 728 *
 729 * @dev_priv: Pointer to a device private structure.
 730 * @sw_context: Pointer to the software context.
 731 * @res_type: Resource type.
 732 * @converter: User-space visisble type specific information.
 733 * @id_loc: Pointer to the location in the command buffer currently being
 734 * parsed from where the user-space resource id handle is located.
 735 * @p_val: Pointer to pointer to resource validalidation node. Populated
 736 * on exit.
 737 */
 738static int
 739vmw_cmd_res_check(struct vmw_private *dev_priv,
 740                  struct vmw_sw_context *sw_context,
 741                  enum vmw_res_type res_type,
 742                  const struct vmw_user_resource_conv *converter,
 743                  uint32_t *id_loc,
 744                  struct vmw_resource_val_node **p_val)
 745{
 746        struct vmw_res_cache_entry *rcache =
 747                &sw_context->res_cache[res_type];
 748        struct vmw_resource *res;
 749        struct vmw_resource_val_node *node;
 750        int ret;
 751
 752        if (*id_loc == SVGA3D_INVALID_ID) {
 753                if (p_val)
 754                        *p_val = NULL;
 755                if (res_type == vmw_res_context) {
 756                        DRM_ERROR("Illegal context invalid id.\n");
 757                        return -EINVAL;
 758                }
 759                return 0;
 760        }
 761
 762        /*
 763         * Fastpath in case of repeated commands referencing the same
 764         * resource
 765         */
 766
 767        if (likely(rcache->valid && *id_loc == rcache->handle)) {
 768                const struct vmw_resource *res = rcache->res;
 769
 770                rcache->node->first_usage = false;
 771                if (p_val)
 772                        *p_val = rcache->node;
 773
 774                return vmw_resource_relocation_add
 775                        (&sw_context->res_relocations, res,
 776                         vmw_ptr_diff(sw_context->buf_start, id_loc),
 777                         vmw_res_rel_normal);
 778        }
 779
 780        ret = vmw_user_resource_lookup_handle(dev_priv,
 781                                              sw_context->fp->tfile,
 782                                              *id_loc,
 783                                              converter,
 784                                              &res);
 785        if (unlikely(ret != 0)) {
 786                DRM_ERROR("Could not find or use resource 0x%08x.\n",
 787                          (unsigned) *id_loc);
 788                dump_stack();
 789                return ret;
 790        }
 791
 792        rcache->valid = true;
 793        rcache->res = res;
 794        rcache->handle = *id_loc;
 795
 796        ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 797                                    res, &node);
 798        if (unlikely(ret != 0))
 799                goto out_no_reloc;
 800
 801        rcache->node = node;
 802        if (p_val)
 803                *p_val = node;
 804        vmw_resource_unreference(&res);
 805        return 0;
 806
 807out_no_reloc:
 808        BUG_ON(sw_context->error_resource != NULL);
 809        sw_context->error_resource = res;
 810
 811        return ret;
 812}
 813
 814/**
 815 * vmw_rebind_dx_query - Rebind DX query associated with the context
 816 *
 817 * @ctx_res: context the query belongs to
 818 *
 819 * This function assumes binding_mutex is held.
 820 */
 821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 822{
 823        struct vmw_private *dev_priv = ctx_res->dev_priv;
 824        struct vmw_dma_buffer *dx_query_mob;
 825        struct {
 826                SVGA3dCmdHeader header;
 827                SVGA3dCmdDXBindAllQuery body;
 828        } *cmd;
 829
 830
 831        dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 832
 833        if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 834                return 0;
 835
 836        cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
 837
 838        if (cmd == NULL) {
 839                DRM_ERROR("Failed to rebind queries.\n");
 840                return -ENOMEM;
 841        }
 842
 843        cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 844        cmd->header.size = sizeof(cmd->body);
 845        cmd->body.cid = ctx_res->id;
 846        cmd->body.mobid = dx_query_mob->base.mem.start;
 847        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 848
 849        vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 850
 851        return 0;
 852}
 853
 854/**
 855 * vmw_rebind_contexts - Rebind all resources previously bound to
 856 * referenced contexts.
 857 *
 858 * @sw_context: Pointer to the software context.
 859 *
 860 * Rebind context binding points that have been scrubbed because of eviction.
 861 */
 862static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 863{
 864        struct vmw_resource_val_node *val;
 865        int ret;
 866
 867        list_for_each_entry(val, &sw_context->resource_list, head) {
 868                if (unlikely(!val->staged_bindings))
 869                        break;
 870
 871                ret = vmw_binding_rebind_all
 872                        (vmw_context_binding_state(val->res));
 873                if (unlikely(ret != 0)) {
 874                        if (ret != -ERESTARTSYS)
 875                                DRM_ERROR("Failed to rebind context.\n");
 876                        return ret;
 877                }
 878
 879                ret = vmw_rebind_all_dx_query(val->res);
 880                if (ret != 0)
 881                        return ret;
 882        }
 883
 884        return 0;
 885}
 886
 887/**
 888 * vmw_view_bindings_add - Add an array of view bindings to a context
 889 * binding state tracker.
 890 *
 891 * @sw_context: The execbuf state used for this command.
 892 * @view_type: View type for the bindings.
 893 * @binding_type: Binding type for the bindings.
 894 * @shader_slot: The shader slot to user for the bindings.
 895 * @view_ids: Array of view ids to be bound.
 896 * @num_views: Number of view ids in @view_ids.
 897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 898 */
 899static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 900                                 enum vmw_view_type view_type,
 901                                 enum vmw_ctx_binding_type binding_type,
 902                                 uint32 shader_slot,
 903                                 uint32 view_ids[], u32 num_views,
 904                                 u32 first_slot)
 905{
 906        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 907        struct vmw_cmdbuf_res_manager *man;
 908        u32 i;
 909        int ret;
 910
 911        if (!ctx_node) {
 912                DRM_ERROR("DX Context not set.\n");
 913                return -EINVAL;
 914        }
 915
 916        man = sw_context->man;
 917        for (i = 0; i < num_views; ++i) {
 918                struct vmw_ctx_bindinfo_view binding;
 919                struct vmw_resource *view = NULL;
 920
 921                if (view_ids[i] != SVGA3D_INVALID_ID) {
 922                        view = vmw_view_lookup(man, view_type, view_ids[i]);
 923                        if (IS_ERR(view)) {
 924                                DRM_ERROR("View not found.\n");
 925                                return PTR_ERR(view);
 926                        }
 927
 928                        ret = vmw_view_res_val_add(sw_context, view);
 929                        if (ret) {
 930                                DRM_ERROR("Could not add view to "
 931                                          "validation list.\n");
 932                                vmw_resource_unreference(&view);
 933                                return ret;
 934                        }
 935                }
 936                binding.bi.ctx = ctx_node->res;
 937                binding.bi.res = view;
 938                binding.bi.bt = binding_type;
 939                binding.shader_slot = shader_slot;
 940                binding.slot = first_slot + i;
 941                vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
 942                                shader_slot, binding.slot);
 943                if (view)
 944                        vmw_resource_unreference(&view);
 945        }
 946
 947        return 0;
 948}
 949
 950/**
 951 * vmw_cmd_cid_check - Check a command header for valid context information.
 952 *
 953 * @dev_priv: Pointer to a device private structure.
 954 * @sw_context: Pointer to the software context.
 955 * @header: A command header with an embedded user-space context handle.
 956 *
 957 * Convenience function: Call vmw_cmd_res_check with the user-space context
 958 * handle embedded in @header.
 959 */
 960static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 961                             struct vmw_sw_context *sw_context,
 962                             SVGA3dCmdHeader *header)
 963{
 964        struct vmw_cid_cmd {
 965                SVGA3dCmdHeader header;
 966                uint32_t cid;
 967        } *cmd;
 968
 969        cmd = container_of(header, struct vmw_cid_cmd, header);
 970        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 971                                 user_context_converter, &cmd->cid, NULL);
 972}
 973
 974static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 975                                           struct vmw_sw_context *sw_context,
 976                                           SVGA3dCmdHeader *header)
 977{
 978        struct vmw_sid_cmd {
 979                SVGA3dCmdHeader header;
 980                SVGA3dCmdSetRenderTarget body;
 981        } *cmd;
 982        struct vmw_resource_val_node *ctx_node;
 983        struct vmw_resource_val_node *res_node;
 984        int ret;
 985
 986        cmd = container_of(header, struct vmw_sid_cmd, header);
 987
 988        if (cmd->body.type >= SVGA3D_RT_MAX) {
 989                DRM_ERROR("Illegal render target type %u.\n",
 990                          (unsigned) cmd->body.type);
 991                return -EINVAL;
 992        }
 993
 994        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 995                                user_context_converter, &cmd->body.cid,
 996                                &ctx_node);
 997        if (unlikely(ret != 0))
 998                return ret;
 999
1000        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001                                user_surface_converter,
1002                                &cmd->body.target.sid, &res_node);
1003        if (unlikely(ret != 0))
1004                return ret;
1005
1006        if (dev_priv->has_mob) {
1007                struct vmw_ctx_bindinfo_view binding;
1008
1009                binding.bi.ctx = ctx_node->res;
1010                binding.bi.res = res_node ? res_node->res : NULL;
1011                binding.bi.bt = vmw_ctx_binding_rt;
1012                binding.slot = cmd->body.type;
1013                vmw_binding_add(ctx_node->staged_bindings,
1014                                &binding.bi, 0, binding.slot);
1015        }
1016
1017        return 0;
1018}
1019
1020static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021                                      struct vmw_sw_context *sw_context,
1022                                      SVGA3dCmdHeader *header)
1023{
1024        struct vmw_sid_cmd {
1025                SVGA3dCmdHeader header;
1026                SVGA3dCmdSurfaceCopy body;
1027        } *cmd;
1028        int ret;
1029
1030        cmd = container_of(header, struct vmw_sid_cmd, header);
1031
1032        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033                                user_surface_converter,
1034                                &cmd->body.src.sid, NULL);
1035        if (ret)
1036                return ret;
1037
1038        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039                                 user_surface_converter,
1040                                 &cmd->body.dest.sid, NULL);
1041}
1042
1043static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044                                      struct vmw_sw_context *sw_context,
1045                                      SVGA3dCmdHeader *header)
1046{
1047        struct {
1048                SVGA3dCmdHeader header;
1049                SVGA3dCmdDXBufferCopy body;
1050        } *cmd;
1051        int ret;
1052
1053        cmd = container_of(header, typeof(*cmd), header);
1054        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055                                user_surface_converter,
1056                                &cmd->body.src, NULL);
1057        if (ret != 0)
1058                return ret;
1059
1060        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061                                 user_surface_converter,
1062                                 &cmd->body.dest, NULL);
1063}
1064
1065static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066                                   struct vmw_sw_context *sw_context,
1067                                   SVGA3dCmdHeader *header)
1068{
1069        struct {
1070                SVGA3dCmdHeader header;
1071                SVGA3dCmdDXPredCopyRegion body;
1072        } *cmd;
1073        int ret;
1074
1075        cmd = container_of(header, typeof(*cmd), header);
1076        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077                                user_surface_converter,
1078                                &cmd->body.srcSid, NULL);
1079        if (ret != 0)
1080                return ret;
1081
1082        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083                                 user_surface_converter,
1084                                 &cmd->body.dstSid, NULL);
1085}
1086
1087static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088                                     struct vmw_sw_context *sw_context,
1089                                     SVGA3dCmdHeader *header)
1090{
1091        struct vmw_sid_cmd {
1092                SVGA3dCmdHeader header;
1093                SVGA3dCmdSurfaceStretchBlt body;
1094        } *cmd;
1095        int ret;
1096
1097        cmd = container_of(header, struct vmw_sid_cmd, header);
1098        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099                                user_surface_converter,
1100                                &cmd->body.src.sid, NULL);
1101        if (unlikely(ret != 0))
1102                return ret;
1103        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104                                 user_surface_converter,
1105                                 &cmd->body.dest.sid, NULL);
1106}
1107
1108static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109                                         struct vmw_sw_context *sw_context,
1110                                         SVGA3dCmdHeader *header)
1111{
1112        struct vmw_sid_cmd {
1113                SVGA3dCmdHeader header;
1114                SVGA3dCmdBlitSurfaceToScreen body;
1115        } *cmd;
1116
1117        cmd = container_of(header, struct vmw_sid_cmd, header);
1118
1119        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120                                 user_surface_converter,
1121                                 &cmd->body.srcImage.sid, NULL);
1122}
1123
1124static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125                                 struct vmw_sw_context *sw_context,
1126                                 SVGA3dCmdHeader *header)
1127{
1128        struct vmw_sid_cmd {
1129                SVGA3dCmdHeader header;
1130                SVGA3dCmdPresent body;
1131        } *cmd;
1132
1133
1134        cmd = container_of(header, struct vmw_sid_cmd, header);
1135
1136        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137                                 user_surface_converter, &cmd->body.sid,
1138                                 NULL);
1139}
1140
1141/**
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1143 *
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1147 *
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1152 * command batch.
1153 */
1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155                                       struct vmw_dma_buffer *new_query_bo,
1156                                       struct vmw_sw_context *sw_context)
1157{
1158        struct vmw_res_cache_entry *ctx_entry =
1159                &sw_context->res_cache[vmw_res_context];
1160        int ret;
1161
1162        BUG_ON(!ctx_entry->valid);
1163        sw_context->last_query_ctx = ctx_entry->res;
1164
1165        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1166
1167                if (unlikely(new_query_bo->base.num_pages > 4)) {
1168                        DRM_ERROR("Query buffer too large.\n");
1169                        return -EINVAL;
1170                }
1171
1172                if (unlikely(sw_context->cur_query_bo != NULL)) {
1173                        sw_context->needs_post_query_barrier = true;
1174                        ret = vmw_bo_to_validate_list(sw_context,
1175                                                      sw_context->cur_query_bo,
1176                                                      dev_priv->has_mob, NULL);
1177                        if (unlikely(ret != 0))
1178                                return ret;
1179                }
1180                sw_context->cur_query_bo = new_query_bo;
1181
1182                ret = vmw_bo_to_validate_list(sw_context,
1183                                              dev_priv->dummy_query_bo,
1184                                              dev_priv->has_mob, NULL);
1185                if (unlikely(ret != 0))
1186                        return ret;
1187
1188        }
1189
1190        return 0;
1191}
1192
1193
1194/**
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1196 *
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1199 *
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1207 *
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1210 */
1211static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212                                     struct vmw_sw_context *sw_context)
1213{
1214        /*
1215         * The validate list should still hold references to all
1216         * contexts here.
1217         */
1218
1219        if (sw_context->needs_post_query_barrier) {
1220                struct vmw_res_cache_entry *ctx_entry =
1221                        &sw_context->res_cache[vmw_res_context];
1222                struct vmw_resource *ctx;
1223                int ret;
1224
1225                BUG_ON(!ctx_entry->valid);
1226                ctx = ctx_entry->res;
1227
1228                ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1229
1230                if (unlikely(ret != 0))
1231                        DRM_ERROR("Out of fifo space for dummy query.\n");
1232        }
1233
1234        if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235                if (dev_priv->pinned_bo) {
1236                        vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237                        vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1238                }
1239
1240                if (!sw_context->needs_post_query_barrier) {
1241                        vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1242
1243                        /*
1244                         * We pin also the dummy_query_bo buffer so that we
1245                         * don't need to validate it when emitting
1246                         * dummy queries in context destroy paths.
1247                         */
1248
1249                        if (!dev_priv->dummy_query_bo_pinned) {
1250                                vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1251                                                    true);
1252                                dev_priv->dummy_query_bo_pinned = true;
1253                        }
1254
1255                        BUG_ON(sw_context->last_query_ctx == NULL);
1256                        dev_priv->query_cid = sw_context->last_query_ctx->id;
1257                        dev_priv->query_cid_valid = true;
1258                        dev_priv->pinned_bo =
1259                                vmw_dmabuf_reference(sw_context->cur_query_bo);
1260                }
1261        }
1262}
1263
1264/**
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1267 *
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1274 *
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1281 */
1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283                                 struct vmw_sw_context *sw_context,
1284                                 SVGAMobId *id,
1285                                 struct vmw_dma_buffer **vmw_bo_p)
1286{
1287        struct vmw_dma_buffer *vmw_bo = NULL;
1288        uint32_t handle = *id;
1289        struct vmw_relocation *reloc;
1290        int ret;
1291
1292        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1293                                     NULL);
1294        if (unlikely(ret != 0)) {
1295                DRM_ERROR("Could not find or use MOB buffer.\n");
1296                ret = -EINVAL;
1297                goto out_no_reloc;
1298        }
1299
1300        if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1301                DRM_ERROR("Max number relocations per submission"
1302                          " exceeded\n");
1303                ret = -EINVAL;
1304                goto out_no_reloc;
1305        }
1306
1307        reloc = &sw_context->relocs[sw_context->cur_reloc++];
1308        reloc->mob_loc = id;
1309        reloc->location = NULL;
1310
1311        ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1312        if (unlikely(ret != 0))
1313                goto out_no_reloc;
1314
1315        *vmw_bo_p = vmw_bo;
1316        return 0;
1317
1318out_no_reloc:
1319        vmw_dmabuf_unreference(&vmw_bo);
1320        *vmw_bo_p = NULL;
1321        return ret;
1322}
1323
1324/**
1325 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1326 * handle to a valid SVGAGuestPtr
1327 *
1328 * @dev_priv: Pointer to a device private structure.
1329 * @sw_context: The software context used for this command batch validation.
1330 * @ptr: Pointer to the user-space handle to be translated.
1331 * @vmw_bo_p: Points to a location that, on successful return will carry
1332 * a reference-counted pointer to the DMA buffer identified by the
1333 * user-space handle in @id.
1334 *
1335 * This function saves information needed to translate a user-space buffer
1336 * handle to a valid SVGAGuestPtr. The translation does not take place
1337 * immediately, but during a call to vmw_apply_relocations().
1338 * This function builds a relocation list and a list of buffers to validate.
1339 * The former needs to be freed using either vmw_apply_relocations() or
1340 * vmw_free_relocations(). The latter needs to be freed using
1341 * vmw_clear_validations.
1342 */
1343static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344                                   struct vmw_sw_context *sw_context,
1345                                   SVGAGuestPtr *ptr,
1346                                   struct vmw_dma_buffer **vmw_bo_p)
1347{
1348        struct vmw_dma_buffer *vmw_bo = NULL;
1349        uint32_t handle = ptr->gmrId;
1350        struct vmw_relocation *reloc;
1351        int ret;
1352
1353        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1354                                     NULL);
1355        if (unlikely(ret != 0)) {
1356                DRM_ERROR("Could not find or use GMR region.\n");
1357                ret = -EINVAL;
1358                goto out_no_reloc;
1359        }
1360
1361        if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1362                DRM_ERROR("Max number relocations per submission"
1363                          " exceeded\n");
1364                ret = -EINVAL;
1365                goto out_no_reloc;
1366        }
1367
1368        reloc = &sw_context->relocs[sw_context->cur_reloc++];
1369        reloc->location = ptr;
1370
1371        ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1372        if (unlikely(ret != 0))
1373                goto out_no_reloc;
1374
1375        *vmw_bo_p = vmw_bo;
1376        return 0;
1377
1378out_no_reloc:
1379        vmw_dmabuf_unreference(&vmw_bo);
1380        *vmw_bo_p = NULL;
1381        return ret;
1382}
1383
1384
1385
1386/**
1387 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1388 *
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context used for this command submission.
1391 * @header: Pointer to the command header in the command stream.
1392 *
1393 * This function adds the new query into the query COTABLE
1394 */
1395static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1396                                   struct vmw_sw_context *sw_context,
1397                                   SVGA3dCmdHeader *header)
1398{
1399        struct vmw_dx_define_query_cmd {
1400                SVGA3dCmdHeader header;
1401                SVGA3dCmdDXDefineQuery q;
1402        } *cmd;
1403
1404        int    ret;
1405        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1406        struct vmw_resource *cotable_res;
1407
1408
1409        if (ctx_node == NULL) {
1410                DRM_ERROR("DX Context not set for query.\n");
1411                return -EINVAL;
1412        }
1413
1414        cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1415
1416        if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1417            cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1418                return -EINVAL;
1419
1420        cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1421        ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1422        vmw_resource_unreference(&cotable_res);
1423
1424        return ret;
1425}
1426
1427
1428
1429/**
1430 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1431 *
1432 * @dev_priv: Pointer to a device private struct.
1433 * @sw_context: The software context used for this command submission.
1434 * @header: Pointer to the command header in the command stream.
1435 *
1436 * The query bind operation will eventually associate the query ID
1437 * with its backing MOB.  In this function, we take the user mode
1438 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1439 * kernel mode equivalent.
1440 */
1441static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1442                                 struct vmw_sw_context *sw_context,
1443                                 SVGA3dCmdHeader *header)
1444{
1445        struct vmw_dx_bind_query_cmd {
1446                SVGA3dCmdHeader header;
1447                SVGA3dCmdDXBindQuery q;
1448        } *cmd;
1449
1450        struct vmw_dma_buffer *vmw_bo;
1451        int    ret;
1452
1453
1454        cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1455
1456        /*
1457         * Look up the buffer pointed to by q.mobid, put it on the relocation
1458         * list so its kernel mode MOB ID can be filled in later
1459         */
1460        ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1461                                    &vmw_bo);
1462
1463        if (ret != 0)
1464                return ret;
1465
1466        sw_context->dx_query_mob = vmw_bo;
1467        sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1468
1469        vmw_dmabuf_unreference(&vmw_bo);
1470
1471        return ret;
1472}
1473
1474
1475
1476/**
1477 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1478 *
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1482 */
1483static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1484                                  struct vmw_sw_context *sw_context,
1485                                  SVGA3dCmdHeader *header)
1486{
1487        struct vmw_begin_gb_query_cmd {
1488                SVGA3dCmdHeader header;
1489                SVGA3dCmdBeginGBQuery q;
1490        } *cmd;
1491
1492        cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1493                           header);
1494
1495        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1496                                 user_context_converter, &cmd->q.cid,
1497                                 NULL);
1498}
1499
1500/**
1501 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1502 *
1503 * @dev_priv: Pointer to a device private struct.
1504 * @sw_context: The software context used for this command submission.
1505 * @header: Pointer to the command header in the command stream.
1506 */
1507static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1508                               struct vmw_sw_context *sw_context,
1509                               SVGA3dCmdHeader *header)
1510{
1511        struct vmw_begin_query_cmd {
1512                SVGA3dCmdHeader header;
1513                SVGA3dCmdBeginQuery q;
1514        } *cmd;
1515
1516        cmd = container_of(header, struct vmw_begin_query_cmd,
1517                           header);
1518
1519        if (unlikely(dev_priv->has_mob)) {
1520                struct {
1521                        SVGA3dCmdHeader header;
1522                        SVGA3dCmdBeginGBQuery q;
1523                } gb_cmd;
1524
1525                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1526
1527                gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1528                gb_cmd.header.size = cmd->header.size;
1529                gb_cmd.q.cid = cmd->q.cid;
1530                gb_cmd.q.type = cmd->q.type;
1531
1532                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1533                return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1534        }
1535
1536        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1537                                 user_context_converter, &cmd->q.cid,
1538                                 NULL);
1539}
1540
1541/**
1542 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1543 *
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context used for this command submission.
1546 * @header: Pointer to the command header in the command stream.
1547 */
1548static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549                                struct vmw_sw_context *sw_context,
1550                                SVGA3dCmdHeader *header)
1551{
1552        struct vmw_dma_buffer *vmw_bo;
1553        struct vmw_query_cmd {
1554                SVGA3dCmdHeader header;
1555                SVGA3dCmdEndGBQuery q;
1556        } *cmd;
1557        int ret;
1558
1559        cmd = container_of(header, struct vmw_query_cmd, header);
1560        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1561        if (unlikely(ret != 0))
1562                return ret;
1563
1564        ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1565                                    &cmd->q.mobid,
1566                                    &vmw_bo);
1567        if (unlikely(ret != 0))
1568                return ret;
1569
1570        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571
1572        vmw_dmabuf_unreference(&vmw_bo);
1573        return ret;
1574}
1575
1576/**
1577 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1578 *
1579 * @dev_priv: Pointer to a device private struct.
1580 * @sw_context: The software context used for this command submission.
1581 * @header: Pointer to the command header in the command stream.
1582 */
1583static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584                             struct vmw_sw_context *sw_context,
1585                             SVGA3dCmdHeader *header)
1586{
1587        struct vmw_dma_buffer *vmw_bo;
1588        struct vmw_query_cmd {
1589                SVGA3dCmdHeader header;
1590                SVGA3dCmdEndQuery q;
1591        } *cmd;
1592        int ret;
1593
1594        cmd = container_of(header, struct vmw_query_cmd, header);
1595        if (dev_priv->has_mob) {
1596                struct {
1597                        SVGA3dCmdHeader header;
1598                        SVGA3dCmdEndGBQuery q;
1599                } gb_cmd;
1600
1601                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1602
1603                gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1604                gb_cmd.header.size = cmd->header.size;
1605                gb_cmd.q.cid = cmd->q.cid;
1606                gb_cmd.q.type = cmd->q.type;
1607                gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1608                gb_cmd.q.offset = cmd->q.guestResult.offset;
1609
1610                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1611                return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1612        }
1613
1614        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1615        if (unlikely(ret != 0))
1616                return ret;
1617
1618        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1619                                      &cmd->q.guestResult,
1620                                      &vmw_bo);
1621        if (unlikely(ret != 0))
1622                return ret;
1623
1624        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1625
1626        vmw_dmabuf_unreference(&vmw_bo);
1627        return ret;
1628}
1629
1630/**
1631 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1632 *
1633 * @dev_priv: Pointer to a device private struct.
1634 * @sw_context: The software context used for this command submission.
1635 * @header: Pointer to the command header in the command stream.
1636 */
1637static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638                                 struct vmw_sw_context *sw_context,
1639                                 SVGA3dCmdHeader *header)
1640{
1641        struct vmw_dma_buffer *vmw_bo;
1642        struct vmw_query_cmd {
1643                SVGA3dCmdHeader header;
1644                SVGA3dCmdWaitForGBQuery q;
1645        } *cmd;
1646        int ret;
1647
1648        cmd = container_of(header, struct vmw_query_cmd, header);
1649        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1650        if (unlikely(ret != 0))
1651                return ret;
1652
1653        ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1654                                    &cmd->q.mobid,
1655                                    &vmw_bo);
1656        if (unlikely(ret != 0))
1657                return ret;
1658
1659        vmw_dmabuf_unreference(&vmw_bo);
1660        return 0;
1661}
1662
1663/**
1664 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1665 *
1666 * @dev_priv: Pointer to a device private struct.
1667 * @sw_context: The software context used for this command submission.
1668 * @header: Pointer to the command header in the command stream.
1669 */
1670static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671                              struct vmw_sw_context *sw_context,
1672                              SVGA3dCmdHeader *header)
1673{
1674        struct vmw_dma_buffer *vmw_bo;
1675        struct vmw_query_cmd {
1676                SVGA3dCmdHeader header;
1677                SVGA3dCmdWaitForQuery q;
1678        } *cmd;
1679        int ret;
1680
1681        cmd = container_of(header, struct vmw_query_cmd, header);
1682        if (dev_priv->has_mob) {
1683                struct {
1684                        SVGA3dCmdHeader header;
1685                        SVGA3dCmdWaitForGBQuery q;
1686                } gb_cmd;
1687
1688                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1689
1690                gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1691                gb_cmd.header.size = cmd->header.size;
1692                gb_cmd.q.cid = cmd->q.cid;
1693                gb_cmd.q.type = cmd->q.type;
1694                gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1695                gb_cmd.q.offset = cmd->q.guestResult.offset;
1696
1697                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1698                return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1699        }
1700
1701        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1702        if (unlikely(ret != 0))
1703                return ret;
1704
1705        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1706                                      &cmd->q.guestResult,
1707                                      &vmw_bo);
1708        if (unlikely(ret != 0))
1709                return ret;
1710
1711        vmw_dmabuf_unreference(&vmw_bo);
1712        return 0;
1713}
1714
1715static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716                       struct vmw_sw_context *sw_context,
1717                       SVGA3dCmdHeader *header)
1718{
1719        struct vmw_dma_buffer *vmw_bo = NULL;
1720        struct vmw_surface *srf = NULL;
1721        struct vmw_dma_cmd {
1722                SVGA3dCmdHeader header;
1723                SVGA3dCmdSurfaceDMA dma;
1724        } *cmd;
1725        int ret;
1726        SVGA3dCmdSurfaceDMASuffix *suffix;
1727        uint32_t bo_size;
1728
1729        cmd = container_of(header, struct vmw_dma_cmd, header);
1730        suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1731                                               header->size - sizeof(*suffix));
1732
1733        /* Make sure device and verifier stays in sync. */
1734        if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1735                DRM_ERROR("Invalid DMA suffix size.\n");
1736                return -EINVAL;
1737        }
1738
1739        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1740                                      &cmd->dma.guest.ptr,
1741                                      &vmw_bo);
1742        if (unlikely(ret != 0))
1743                return ret;
1744
1745        /* Make sure DMA doesn't cross BO boundaries. */
1746        bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1747        if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1748                DRM_ERROR("Invalid DMA offset.\n");
1749                return -EINVAL;
1750        }
1751
1752        bo_size -= cmd->dma.guest.ptr.offset;
1753        if (unlikely(suffix->maximumOffset > bo_size))
1754                suffix->maximumOffset = bo_size;
1755
1756        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1757                                user_surface_converter, &cmd->dma.host.sid,
1758                                NULL);
1759        if (unlikely(ret != 0)) {
1760                if (unlikely(ret != -ERESTARTSYS))
1761                        DRM_ERROR("could not find surface for DMA.\n");
1762                goto out_no_surface;
1763        }
1764
1765        srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1766
1767        vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1768                             header);
1769
1770out_no_surface:
1771        vmw_dmabuf_unreference(&vmw_bo);
1772        return ret;
1773}
1774
1775static int vmw_cmd_draw(struct vmw_private *dev_priv,
1776                        struct vmw_sw_context *sw_context,
1777                        SVGA3dCmdHeader *header)
1778{
1779        struct vmw_draw_cmd {
1780                SVGA3dCmdHeader header;
1781                SVGA3dCmdDrawPrimitives body;
1782        } *cmd;
1783        SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1784                (unsigned long)header + sizeof(*cmd));
1785        SVGA3dPrimitiveRange *range;
1786        uint32_t i;
1787        uint32_t maxnum;
1788        int ret;
1789
1790        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1791        if (unlikely(ret != 0))
1792                return ret;
1793
1794        cmd = container_of(header, struct vmw_draw_cmd, header);
1795        maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1796
1797        if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1798                DRM_ERROR("Illegal number of vertex declarations.\n");
1799                return -EINVAL;
1800        }
1801
1802        for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1803                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1804                                        user_surface_converter,
1805                                        &decl->array.surfaceId, NULL);
1806                if (unlikely(ret != 0))
1807                        return ret;
1808        }
1809
1810        maxnum = (header->size - sizeof(cmd->body) -
1811                  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1812        if (unlikely(cmd->body.numRanges > maxnum)) {
1813                DRM_ERROR("Illegal number of index ranges.\n");
1814                return -EINVAL;
1815        }
1816
1817        range = (SVGA3dPrimitiveRange *) decl;
1818        for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1819                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1820                                        user_surface_converter,
1821                                        &range->indexArray.surfaceId, NULL);
1822                if (unlikely(ret != 0))
1823                        return ret;
1824        }
1825        return 0;
1826}
1827
1828
1829static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1830                             struct vmw_sw_context *sw_context,
1831                             SVGA3dCmdHeader *header)
1832{
1833        struct vmw_tex_state_cmd {
1834                SVGA3dCmdHeader header;
1835                SVGA3dCmdSetTextureState state;
1836        } *cmd;
1837
1838        SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1839          ((unsigned long) header + header->size + sizeof(header));
1840        SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1841                ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1842        struct vmw_resource_val_node *ctx_node;
1843        struct vmw_resource_val_node *res_node;
1844        int ret;
1845
1846        cmd = container_of(header, struct vmw_tex_state_cmd,
1847                           header);
1848
1849        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1850                                user_context_converter, &cmd->state.cid,
1851                                &ctx_node);
1852        if (unlikely(ret != 0))
1853                return ret;
1854
1855        for (; cur_state < last_state; ++cur_state) {
1856                if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1857                        continue;
1858
1859                if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1860                        DRM_ERROR("Illegal texture/sampler unit %u.\n",
1861                                  (unsigned) cur_state->stage);
1862                        return -EINVAL;
1863                }
1864
1865                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866                                        user_surface_converter,
1867                                        &cur_state->value, &res_node);
1868                if (unlikely(ret != 0))
1869                        return ret;
1870
1871                if (dev_priv->has_mob) {
1872                        struct vmw_ctx_bindinfo_tex binding;
1873
1874                        binding.bi.ctx = ctx_node->res;
1875                        binding.bi.res = res_node ? res_node->res : NULL;
1876                        binding.bi.bt = vmw_ctx_binding_tex;
1877                        binding.texture_stage = cur_state->stage;
1878                        vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1879                                        0, binding.texture_stage);
1880                }
1881        }
1882
1883        return 0;
1884}
1885
1886static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887                                      struct vmw_sw_context *sw_context,
1888                                      void *buf)
1889{
1890        struct vmw_dma_buffer *vmw_bo;
1891        int ret;
1892
1893        struct {
1894                uint32_t header;
1895                SVGAFifoCmdDefineGMRFB body;
1896        } *cmd = buf;
1897
1898        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1899                                      &cmd->body.ptr,
1900                                      &vmw_bo);
1901        if (unlikely(ret != 0))
1902                return ret;
1903
1904        vmw_dmabuf_unreference(&vmw_bo);
1905
1906        return ret;
1907}
1908
1909
1910/**
1911 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1912 * switching
1913 *
1914 * @dev_priv: Pointer to a device private struct.
1915 * @sw_context: The software context being used for this batch.
1916 * @val_node: The validation node representing the resource.
1917 * @buf_id: Pointer to the user-space backup buffer handle in the command
1918 * stream.
1919 * @backup_offset: Offset of backup into MOB.
1920 *
1921 * This function prepares for registering a switch of backup buffers
1922 * in the resource metadata just prior to unreserving. It's basically a wrapper
1923 * around vmw_cmd_res_switch_backup with a different interface.
1924 */
1925static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1926                                     struct vmw_sw_context *sw_context,
1927                                     struct vmw_resource_val_node *val_node,
1928                                     uint32_t *buf_id,
1929                                     unsigned long backup_offset)
1930{
1931        struct vmw_dma_buffer *dma_buf;
1932        int ret;
1933
1934        ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1935        if (ret)
1936                return ret;
1937
1938        val_node->switching_backup = true;
1939        if (val_node->first_usage)
1940                val_node->no_buffer_needed = true;
1941
1942        vmw_dmabuf_unreference(&val_node->new_backup);
1943        val_node->new_backup = dma_buf;
1944        val_node->new_backup_offset = backup_offset;
1945
1946        return 0;
1947}
1948
1949
1950/**
1951 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1952 *
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @res_type: The resource type.
1956 * @converter: Information about user-space binding for this resource type.
1957 * @res_id: Pointer to the user-space resource handle in the command stream.
1958 * @buf_id: Pointer to the user-space backup buffer handle in the command
1959 * stream.
1960 * @backup_offset: Offset of backup into MOB.
1961 *
1962 * This function prepares for registering a switch of backup buffers
1963 * in the resource metadata just prior to unreserving. It's basically a wrapper
1964 * around vmw_cmd_res_switch_backup with a different interface.
1965 */
1966static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1967                                 struct vmw_sw_context *sw_context,
1968                                 enum vmw_res_type res_type,
1969                                 const struct vmw_user_resource_conv
1970                                 *converter,
1971                                 uint32_t *res_id,
1972                                 uint32_t *buf_id,
1973                                 unsigned long backup_offset)
1974{
1975        struct vmw_resource_val_node *val_node;
1976        int ret;
1977
1978        ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1979                                converter, res_id, &val_node);
1980        if (ret)
1981                return ret;
1982
1983        return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1984                                         buf_id, backup_offset);
1985}
1986
1987/**
1988 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1989 * command
1990 *
1991 * @dev_priv: Pointer to a device private struct.
1992 * @sw_context: The software context being used for this batch.
1993 * @header: Pointer to the command header in the command stream.
1994 */
1995static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1996                                   struct vmw_sw_context *sw_context,
1997                                   SVGA3dCmdHeader *header)
1998{
1999        struct vmw_bind_gb_surface_cmd {
2000                SVGA3dCmdHeader header;
2001                SVGA3dCmdBindGBSurface body;
2002        } *cmd;
2003
2004        cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2005
2006        return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2007                                     user_surface_converter,
2008                                     &cmd->body.sid, &cmd->body.mobid,
2009                                     0);
2010}
2011
2012/**
2013 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2014 * command
2015 *
2016 * @dev_priv: Pointer to a device private struct.
2017 * @sw_context: The software context being used for this batch.
2018 * @header: Pointer to the command header in the command stream.
2019 */
2020static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2021                                   struct vmw_sw_context *sw_context,
2022                                   SVGA3dCmdHeader *header)
2023{
2024        struct vmw_gb_surface_cmd {
2025                SVGA3dCmdHeader header;
2026                SVGA3dCmdUpdateGBImage body;
2027        } *cmd;
2028
2029        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2030
2031        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2032                                 user_surface_converter,
2033                                 &cmd->body.image.sid, NULL);
2034}
2035
2036/**
2037 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2038 * command
2039 *
2040 * @dev_priv: Pointer to a device private struct.
2041 * @sw_context: The software context being used for this batch.
2042 * @header: Pointer to the command header in the command stream.
2043 */
2044static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2045                                     struct vmw_sw_context *sw_context,
2046                                     SVGA3dCmdHeader *header)
2047{
2048        struct vmw_gb_surface_cmd {
2049                SVGA3dCmdHeader header;
2050                SVGA3dCmdUpdateGBSurface body;
2051        } *cmd;
2052
2053        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2054
2055        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2056                                 user_surface_converter,
2057                                 &cmd->body.sid, NULL);
2058}
2059
2060/**
2061 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2062 * command
2063 *
2064 * @dev_priv: Pointer to a device private struct.
2065 * @sw_context: The software context being used for this batch.
2066 * @header: Pointer to the command header in the command stream.
2067 */
2068static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2069                                     struct vmw_sw_context *sw_context,
2070                                     SVGA3dCmdHeader *header)
2071{
2072        struct vmw_gb_surface_cmd {
2073                SVGA3dCmdHeader header;
2074                SVGA3dCmdReadbackGBImage body;
2075        } *cmd;
2076
2077        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2078
2079        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2080                                 user_surface_converter,
2081                                 &cmd->body.image.sid, NULL);
2082}
2083
2084/**
2085 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2086 * command
2087 *
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2091 */
2092static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2093                                       struct vmw_sw_context *sw_context,
2094                                       SVGA3dCmdHeader *header)
2095{
2096        struct vmw_gb_surface_cmd {
2097                SVGA3dCmdHeader header;
2098                SVGA3dCmdReadbackGBSurface body;
2099        } *cmd;
2100
2101        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2102
2103        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2104                                 user_surface_converter,
2105                                 &cmd->body.sid, NULL);
2106}
2107
2108/**
2109 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2110 * command
2111 *
2112 * @dev_priv: Pointer to a device private struct.
2113 * @sw_context: The software context being used for this batch.
2114 * @header: Pointer to the command header in the command stream.
2115 */
2116static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2117                                       struct vmw_sw_context *sw_context,
2118                                       SVGA3dCmdHeader *header)
2119{
2120        struct vmw_gb_surface_cmd {
2121                SVGA3dCmdHeader header;
2122                SVGA3dCmdInvalidateGBImage body;
2123        } *cmd;
2124
2125        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2126
2127        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2128                                 user_surface_converter,
2129                                 &cmd->body.image.sid, NULL);
2130}
2131
2132/**
2133 * vmw_cmd_invalidate_gb_surface - Validate an
2134 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2135 *
2136 * @dev_priv: Pointer to a device private struct.
2137 * @sw_context: The software context being used for this batch.
2138 * @header: Pointer to the command header in the command stream.
2139 */
2140static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2141                                         struct vmw_sw_context *sw_context,
2142                                         SVGA3dCmdHeader *header)
2143{
2144        struct vmw_gb_surface_cmd {
2145                SVGA3dCmdHeader header;
2146                SVGA3dCmdInvalidateGBSurface body;
2147        } *cmd;
2148
2149        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2150
2151        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2152                                 user_surface_converter,
2153                                 &cmd->body.sid, NULL);
2154}
2155
2156
2157/**
2158 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2159 * command
2160 *
2161 * @dev_priv: Pointer to a device private struct.
2162 * @sw_context: The software context being used for this batch.
2163 * @header: Pointer to the command header in the command stream.
2164 */
2165static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2166                                 struct vmw_sw_context *sw_context,
2167                                 SVGA3dCmdHeader *header)
2168{
2169        struct vmw_shader_define_cmd {
2170                SVGA3dCmdHeader header;
2171                SVGA3dCmdDefineShader body;
2172        } *cmd;
2173        int ret;
2174        size_t size;
2175        struct vmw_resource_val_node *val;
2176
2177        cmd = container_of(header, struct vmw_shader_define_cmd,
2178                           header);
2179
2180        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2181                                user_context_converter, &cmd->body.cid,
2182                                &val);
2183        if (unlikely(ret != 0))
2184                return ret;
2185
2186        if (unlikely(!dev_priv->has_mob))
2187                return 0;
2188
2189        size = cmd->header.size - sizeof(cmd->body);
2190        ret = vmw_compat_shader_add(dev_priv,
2191                                    vmw_context_res_man(val->res),
2192                                    cmd->body.shid, cmd + 1,
2193                                    cmd->body.type, size,
2194                                    &sw_context->staged_cmd_res);
2195        if (unlikely(ret != 0))
2196                return ret;
2197
2198        return vmw_resource_relocation_add(&sw_context->res_relocations,
2199                                           NULL,
2200                                           vmw_ptr_diff(sw_context->buf_start,
2201                                                        &cmd->header.id),
2202                                           vmw_res_rel_nop);
2203}
2204
2205/**
2206 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2207 * command
2208 *
2209 * @dev_priv: Pointer to a device private struct.
2210 * @sw_context: The software context being used for this batch.
2211 * @header: Pointer to the command header in the command stream.
2212 */
2213static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2214                                  struct vmw_sw_context *sw_context,
2215                                  SVGA3dCmdHeader *header)
2216{
2217        struct vmw_shader_destroy_cmd {
2218                SVGA3dCmdHeader header;
2219                SVGA3dCmdDestroyShader body;
2220        } *cmd;
2221        int ret;
2222        struct vmw_resource_val_node *val;
2223
2224        cmd = container_of(header, struct vmw_shader_destroy_cmd,
2225                           header);
2226
2227        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228                                user_context_converter, &cmd->body.cid,
2229                                &val);
2230        if (unlikely(ret != 0))
2231                return ret;
2232
2233        if (unlikely(!dev_priv->has_mob))
2234                return 0;
2235
2236        ret = vmw_shader_remove(vmw_context_res_man(val->res),
2237                                cmd->body.shid,
2238                                cmd->body.type,
2239                                &sw_context->staged_cmd_res);
2240        if (unlikely(ret != 0))
2241                return ret;
2242
2243        return vmw_resource_relocation_add(&sw_context->res_relocations,
2244                                           NULL,
2245                                           vmw_ptr_diff(sw_context->buf_start,
2246                                                        &cmd->header.id),
2247                                           vmw_res_rel_nop);
2248}
2249
2250/**
2251 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2252 * command
2253 *
2254 * @dev_priv: Pointer to a device private struct.
2255 * @sw_context: The software context being used for this batch.
2256 * @header: Pointer to the command header in the command stream.
2257 */
2258static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2259                              struct vmw_sw_context *sw_context,
2260                              SVGA3dCmdHeader *header)
2261{
2262        struct vmw_set_shader_cmd {
2263                SVGA3dCmdHeader header;
2264                SVGA3dCmdSetShader body;
2265        } *cmd;
2266        struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2267        struct vmw_ctx_bindinfo_shader binding;
2268        struct vmw_resource *res = NULL;
2269        int ret;
2270
2271        cmd = container_of(header, struct vmw_set_shader_cmd,
2272                           header);
2273
2274        if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2275                DRM_ERROR("Illegal shader type %u.\n",
2276                          (unsigned) cmd->body.type);
2277                return -EINVAL;
2278        }
2279
2280        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2281                                user_context_converter, &cmd->body.cid,
2282                                &ctx_node);
2283        if (unlikely(ret != 0))
2284                return ret;
2285
2286        if (!dev_priv->has_mob)
2287                return 0;
2288
2289        if (cmd->body.shid != SVGA3D_INVALID_ID) {
2290                res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2291                                        cmd->body.shid,
2292                                        cmd->body.type);
2293
2294                if (!IS_ERR(res)) {
2295                        ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2296                                                    &cmd->body.shid, res,
2297                                                    &res_node);
2298                        vmw_resource_unreference(&res);
2299                        if (unlikely(ret != 0))
2300                                return ret;
2301                }
2302        }
2303
2304        if (!res_node) {
2305                ret = vmw_cmd_res_check(dev_priv, sw_context,
2306                                        vmw_res_shader,
2307                                        user_shader_converter,
2308                                        &cmd->body.shid, &res_node);
2309                if (unlikely(ret != 0))
2310                        return ret;
2311        }
2312
2313        binding.bi.ctx = ctx_node->res;
2314        binding.bi.res = res_node ? res_node->res : NULL;
2315        binding.bi.bt = vmw_ctx_binding_shader;
2316        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2317        vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2318                        binding.shader_slot, 0);
2319        return 0;
2320}
2321
2322/**
2323 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2324 * command
2325 *
2326 * @dev_priv: Pointer to a device private struct.
2327 * @sw_context: The software context being used for this batch.
2328 * @header: Pointer to the command header in the command stream.
2329 */
2330static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2331                                    struct vmw_sw_context *sw_context,
2332                                    SVGA3dCmdHeader *header)
2333{
2334        struct vmw_set_shader_const_cmd {
2335                SVGA3dCmdHeader header;
2336                SVGA3dCmdSetShaderConst body;
2337        } *cmd;
2338        int ret;
2339
2340        cmd = container_of(header, struct vmw_set_shader_const_cmd,
2341                           header);
2342
2343        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2344                                user_context_converter, &cmd->body.cid,
2345                                NULL);
2346        if (unlikely(ret != 0))
2347                return ret;
2348
2349        if (dev_priv->has_mob)
2350                header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2351
2352        return 0;
2353}
2354
2355/**
2356 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2357 * command
2358 *
2359 * @dev_priv: Pointer to a device private struct.
2360 * @sw_context: The software context being used for this batch.
2361 * @header: Pointer to the command header in the command stream.
2362 */
2363static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2364                                  struct vmw_sw_context *sw_context,
2365                                  SVGA3dCmdHeader *header)
2366{
2367        struct vmw_bind_gb_shader_cmd {
2368                SVGA3dCmdHeader header;
2369                SVGA3dCmdBindGBShader body;
2370        } *cmd;
2371
2372        cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2373                           header);
2374
2375        return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2376                                     user_shader_converter,
2377                                     &cmd->body.shid, &cmd->body.mobid,
2378                                     cmd->body.offsetInBytes);
2379}
2380
2381/**
2382 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2383 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2384 *
2385 * @dev_priv: Pointer to a device private struct.
2386 * @sw_context: The software context being used for this batch.
2387 * @header: Pointer to the command header in the command stream.
2388 */
2389static int
2390vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2391                                      struct vmw_sw_context *sw_context,
2392                                      SVGA3dCmdHeader *header)
2393{
2394        struct {
2395                SVGA3dCmdHeader header;
2396                SVGA3dCmdDXSetSingleConstantBuffer body;
2397        } *cmd;
2398        struct vmw_resource_val_node *res_node = NULL;
2399        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2400        struct vmw_ctx_bindinfo_cb binding;
2401        int ret;
2402
2403        if (unlikely(ctx_node == NULL)) {
2404                DRM_ERROR("DX Context not set.\n");
2405                return -EINVAL;
2406        }
2407
2408        cmd = container_of(header, typeof(*cmd), header);
2409        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2410                                user_surface_converter,
2411                                &cmd->body.sid, &res_node);
2412        if (unlikely(ret != 0))
2413                return ret;
2414
2415        binding.bi.ctx = ctx_node->res;
2416        binding.bi.res = res_node ? res_node->res : NULL;
2417        binding.bi.bt = vmw_ctx_binding_cb;
2418        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2419        binding.offset = cmd->body.offsetInBytes;
2420        binding.size = cmd->body.sizeInBytes;
2421        binding.slot = cmd->body.slot;
2422
2423        if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2424            binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2425                DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2426                          (unsigned) cmd->body.type,
2427                          (unsigned) binding.slot);
2428                return -EINVAL;
2429        }
2430
2431        vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2432                        binding.shader_slot, binding.slot);
2433
2434        return 0;
2435}
2436
2437/**
2438 * vmw_cmd_dx_set_shader_res - Validate an
2439 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2440 *
2441 * @dev_priv: Pointer to a device private struct.
2442 * @sw_context: The software context being used for this batch.
2443 * @header: Pointer to the command header in the command stream.
2444 */
2445static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2446                                     struct vmw_sw_context *sw_context,
2447                                     SVGA3dCmdHeader *header)
2448{
2449        struct {
2450                SVGA3dCmdHeader header;
2451                SVGA3dCmdDXSetShaderResources body;
2452        } *cmd = container_of(header, typeof(*cmd), header);
2453        u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2454                sizeof(SVGA3dShaderResourceViewId);
2455
2456        if ((u64) cmd->body.startView + (u64) num_sr_view >
2457            (u64) SVGA3D_DX_MAX_SRVIEWS ||
2458            cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2459                DRM_ERROR("Invalid shader binding.\n");
2460                return -EINVAL;
2461        }
2462
2463        return vmw_view_bindings_add(sw_context, vmw_view_sr,
2464                                     vmw_ctx_binding_sr,
2465                                     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2466                                     (void *) &cmd[1], num_sr_view,
2467                                     cmd->body.startView);
2468}
2469
2470/**
2471 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2472 * command
2473 *
2474 * @dev_priv: Pointer to a device private struct.
2475 * @sw_context: The software context being used for this batch.
2476 * @header: Pointer to the command header in the command stream.
2477 */
2478static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2479                                 struct vmw_sw_context *sw_context,
2480                                 SVGA3dCmdHeader *header)
2481{
2482        struct {
2483                SVGA3dCmdHeader header;
2484                SVGA3dCmdDXSetShader body;
2485        } *cmd;
2486        struct vmw_resource *res = NULL;
2487        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488        struct vmw_ctx_bindinfo_shader binding;
2489        int ret = 0;
2490
2491        if (unlikely(ctx_node == NULL)) {
2492                DRM_ERROR("DX Context not set.\n");
2493                return -EINVAL;
2494        }
2495
2496        cmd = container_of(header, typeof(*cmd), header);
2497
2498        if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2499                DRM_ERROR("Illegal shader type %u.\n",
2500                          (unsigned) cmd->body.type);
2501                return -EINVAL;
2502        }
2503
2504        if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2505                res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2506                if (IS_ERR(res)) {
2507                        DRM_ERROR("Could not find shader for binding.\n");
2508                        return PTR_ERR(res);
2509                }
2510
2511                ret = vmw_resource_val_add(sw_context, res, NULL);
2512                if (ret)
2513                        goto out_unref;
2514        }
2515
2516        binding.bi.ctx = ctx_node->res;
2517        binding.bi.res = res;
2518        binding.bi.bt = vmw_ctx_binding_dx_shader;
2519        binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2520
2521        vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2522                        binding.shader_slot, 0);
2523out_unref:
2524        if (res)
2525                vmw_resource_unreference(&res);
2526
2527        return ret;
2528}
2529
2530/**
2531 * vmw_cmd_dx_set_vertex_buffers - Validates an
2532 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2533 *
2534 * @dev_priv: Pointer to a device private struct.
2535 * @sw_context: The software context being used for this batch.
2536 * @header: Pointer to the command header in the command stream.
2537 */
2538static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2539                                         struct vmw_sw_context *sw_context,
2540                                         SVGA3dCmdHeader *header)
2541{
2542        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2543        struct vmw_ctx_bindinfo_vb binding;
2544        struct vmw_resource_val_node *res_node;
2545        struct {
2546                SVGA3dCmdHeader header;
2547                SVGA3dCmdDXSetVertexBuffers body;
2548                SVGA3dVertexBuffer buf[];
2549        } *cmd;
2550        int i, ret, num;
2551
2552        if (unlikely(ctx_node == NULL)) {
2553                DRM_ERROR("DX Context not set.\n");
2554                return -EINVAL;
2555        }
2556
2557        cmd = container_of(header, typeof(*cmd), header);
2558        num = (cmd->header.size - sizeof(cmd->body)) /
2559                sizeof(SVGA3dVertexBuffer);
2560        if ((u64)num + (u64)cmd->body.startBuffer >
2561            (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2562                DRM_ERROR("Invalid number of vertex buffers.\n");
2563                return -EINVAL;
2564        }
2565
2566        for (i = 0; i < num; i++) {
2567                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2568                                        user_surface_converter,
2569                                        &cmd->buf[i].sid, &res_node);
2570                if (unlikely(ret != 0))
2571                        return ret;
2572
2573                binding.bi.ctx = ctx_node->res;
2574                binding.bi.bt = vmw_ctx_binding_vb;
2575                binding.bi.res = ((res_node) ? res_node->res : NULL);
2576                binding.offset = cmd->buf[i].offset;
2577                binding.stride = cmd->buf[i].stride;
2578                binding.slot = i + cmd->body.startBuffer;
2579
2580                vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2581                                0, binding.slot);
2582        }
2583
2584        return 0;
2585}
2586
2587/**
2588 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2589 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2590 *
2591 * @dev_priv: Pointer to a device private struct.
2592 * @sw_context: The software context being used for this batch.
2593 * @header: Pointer to the command header in the command stream.
2594 */
2595static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2596                                       struct vmw_sw_context *sw_context,
2597                                       SVGA3dCmdHeader *header)
2598{
2599        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2600        struct vmw_ctx_bindinfo_ib binding;
2601        struct vmw_resource_val_node *res_node;
2602        struct {
2603                SVGA3dCmdHeader header;
2604                SVGA3dCmdDXSetIndexBuffer body;
2605        } *cmd;
2606        int ret;
2607
2608        if (unlikely(ctx_node == NULL)) {
2609                DRM_ERROR("DX Context not set.\n");
2610                return -EINVAL;
2611        }
2612
2613        cmd = container_of(header, typeof(*cmd), header);
2614        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2615                                user_surface_converter,
2616                                &cmd->body.sid, &res_node);
2617        if (unlikely(ret != 0))
2618                return ret;
2619
2620        binding.bi.ctx = ctx_node->res;
2621        binding.bi.res = ((res_node) ? res_node->res : NULL);
2622        binding.bi.bt = vmw_ctx_binding_ib;
2623        binding.offset = cmd->body.offset;
2624        binding.format = cmd->body.format;
2625
2626        vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2627
2628        return 0;
2629}
2630
2631/**
2632 * vmw_cmd_dx_set_rendertarget - Validate an
2633 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2634 *
2635 * @dev_priv: Pointer to a device private struct.
2636 * @sw_context: The software context being used for this batch.
2637 * @header: Pointer to the command header in the command stream.
2638 */
2639static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2640                                        struct vmw_sw_context *sw_context,
2641                                        SVGA3dCmdHeader *header)
2642{
2643        struct {
2644                SVGA3dCmdHeader header;
2645                SVGA3dCmdDXSetRenderTargets body;
2646        } *cmd = container_of(header, typeof(*cmd), header);
2647        int ret;
2648        u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2649                sizeof(SVGA3dRenderTargetViewId);
2650
2651        if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2652                DRM_ERROR("Invalid DX Rendertarget binding.\n");
2653                return -EINVAL;
2654        }
2655
2656        ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2657                                    vmw_ctx_binding_ds, 0,
2658                                    &cmd->body.depthStencilViewId, 1, 0);
2659        if (ret)
2660                return ret;
2661
2662        return vmw_view_bindings_add(sw_context, vmw_view_rt,
2663                                     vmw_ctx_binding_dx_rt, 0,
2664                                     (void *)&cmd[1], num_rt_view, 0);
2665}
2666
2667/**
2668 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2669 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2670 *
2671 * @dev_priv: Pointer to a device private struct.
2672 * @sw_context: The software context being used for this batch.
2673 * @header: Pointer to the command header in the command stream.
2674 */
2675static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2676                                              struct vmw_sw_context *sw_context,
2677                                              SVGA3dCmdHeader *header)
2678{
2679        struct {
2680                SVGA3dCmdHeader header;
2681                SVGA3dCmdDXClearRenderTargetView body;
2682        } *cmd = container_of(header, typeof(*cmd), header);
2683
2684        return vmw_view_id_val_add(sw_context, vmw_view_rt,
2685                                   cmd->body.renderTargetViewId);
2686}
2687
2688/**
2689 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2690 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2691 *
2692 * @dev_priv: Pointer to a device private struct.
2693 * @sw_context: The software context being used for this batch.
2694 * @header: Pointer to the command header in the command stream.
2695 */
2696static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2697                                              struct vmw_sw_context *sw_context,
2698                                              SVGA3dCmdHeader *header)
2699{
2700        struct {
2701                SVGA3dCmdHeader header;
2702                SVGA3dCmdDXClearDepthStencilView body;
2703        } *cmd = container_of(header, typeof(*cmd), header);
2704
2705        return vmw_view_id_val_add(sw_context, vmw_view_ds,
2706                                   cmd->body.depthStencilViewId);
2707}
2708
2709static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2710                                  struct vmw_sw_context *sw_context,
2711                                  SVGA3dCmdHeader *header)
2712{
2713        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2714        struct vmw_resource_val_node *srf_node;
2715        struct vmw_resource *res;
2716        enum vmw_view_type view_type;
2717        int ret;
2718        /*
2719         * This is based on the fact that all affected define commands have
2720         * the same initial command body layout.
2721         */
2722        struct {
2723                SVGA3dCmdHeader header;
2724                uint32 defined_id;
2725                uint32 sid;
2726        } *cmd;
2727
2728        if (unlikely(ctx_node == NULL)) {
2729                DRM_ERROR("DX Context not set.\n");
2730                return -EINVAL;
2731        }
2732
2733        view_type = vmw_view_cmd_to_type(header->id);
2734        cmd = container_of(header, typeof(*cmd), header);
2735        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736                                user_surface_converter,
2737                                &cmd->sid, &srf_node);
2738        if (unlikely(ret != 0))
2739                return ret;
2740
2741        res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2742        ret = vmw_cotable_notify(res, cmd->defined_id);
2743        vmw_resource_unreference(&res);
2744        if (unlikely(ret != 0))
2745                return ret;
2746
2747        return vmw_view_add(sw_context->man,
2748                            ctx_node->res,
2749                            srf_node->res,
2750                            view_type,
2751                            cmd->defined_id,
2752                            header,
2753                            header->size + sizeof(*header),
2754                            &sw_context->staged_cmd_res);
2755}
2756
2757/**
2758 * vmw_cmd_dx_set_so_targets - Validate an
2759 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2760 *
2761 * @dev_priv: Pointer to a device private struct.
2762 * @sw_context: The software context being used for this batch.
2763 * @header: Pointer to the command header in the command stream.
2764 */
2765static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2766                                     struct vmw_sw_context *sw_context,
2767                                     SVGA3dCmdHeader *header)
2768{
2769        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2770        struct vmw_ctx_bindinfo_so binding;
2771        struct vmw_resource_val_node *res_node;
2772        struct {
2773                SVGA3dCmdHeader header;
2774                SVGA3dCmdDXSetSOTargets body;
2775                SVGA3dSoTarget targets[];
2776        } *cmd;
2777        int i, ret, num;
2778
2779        if (unlikely(ctx_node == NULL)) {
2780                DRM_ERROR("DX Context not set.\n");
2781                return -EINVAL;
2782        }
2783
2784        cmd = container_of(header, typeof(*cmd), header);
2785        num = (cmd->header.size - sizeof(cmd->body)) /
2786                sizeof(SVGA3dSoTarget);
2787
2788        if (num > SVGA3D_DX_MAX_SOTARGETS) {
2789                DRM_ERROR("Invalid DX SO binding.\n");
2790                return -EINVAL;
2791        }
2792
2793        for (i = 0; i < num; i++) {
2794                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795                                        user_surface_converter,
2796                                        &cmd->targets[i].sid, &res_node);
2797                if (unlikely(ret != 0))
2798                        return ret;
2799
2800                binding.bi.ctx = ctx_node->res;
2801                binding.bi.res = ((res_node) ? res_node->res : NULL);
2802                binding.bi.bt = vmw_ctx_binding_so,
2803                binding.offset = cmd->targets[i].offset;
2804                binding.size = cmd->targets[i].sizeInBytes;
2805                binding.slot = i;
2806
2807                vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2808                                0, binding.slot);
2809        }
2810
2811        return 0;
2812}
2813
2814static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2815                                struct vmw_sw_context *sw_context,
2816                                SVGA3dCmdHeader *header)
2817{
2818        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2819        struct vmw_resource *res;
2820        /*
2821         * This is based on the fact that all affected define commands have
2822         * the same initial command body layout.
2823         */
2824        struct {
2825                SVGA3dCmdHeader header;
2826                uint32 defined_id;
2827        } *cmd;
2828        enum vmw_so_type so_type;
2829        int ret;
2830
2831        if (unlikely(ctx_node == NULL)) {
2832                DRM_ERROR("DX Context not set.\n");
2833                return -EINVAL;
2834        }
2835
2836        so_type = vmw_so_cmd_to_type(header->id);
2837        res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2838        cmd = container_of(header, typeof(*cmd), header);
2839        ret = vmw_cotable_notify(res, cmd->defined_id);
2840        vmw_resource_unreference(&res);
2841
2842        return ret;
2843}
2844
2845/**
2846 * vmw_cmd_dx_check_subresource - Validate an
2847 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2848 *
2849 * @dev_priv: Pointer to a device private struct.
2850 * @sw_context: The software context being used for this batch.
2851 * @header: Pointer to the command header in the command stream.
2852 */
2853static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2854                                        struct vmw_sw_context *sw_context,
2855                                        SVGA3dCmdHeader *header)
2856{
2857        struct {
2858                SVGA3dCmdHeader header;
2859                union {
2860                        SVGA3dCmdDXReadbackSubResource r_body;
2861                        SVGA3dCmdDXInvalidateSubResource i_body;
2862                        SVGA3dCmdDXUpdateSubResource u_body;
2863                        SVGA3dSurfaceId sid;
2864                };
2865        } *cmd;
2866
2867        BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2868                     offsetof(typeof(*cmd), sid));
2869        BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2870                     offsetof(typeof(*cmd), sid));
2871        BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2872                     offsetof(typeof(*cmd), sid));
2873
2874        cmd = container_of(header, typeof(*cmd), header);
2875
2876        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2877                                 user_surface_converter,
2878                                 &cmd->sid, NULL);
2879}
2880
2881static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2882                                struct vmw_sw_context *sw_context,
2883                                SVGA3dCmdHeader *header)
2884{
2885        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2886
2887        if (unlikely(ctx_node == NULL)) {
2888                DRM_ERROR("DX Context not set.\n");
2889                return -EINVAL;
2890        }
2891
2892        return 0;
2893}
2894
2895/**
2896 * vmw_cmd_dx_view_remove - validate a view remove command and
2897 * schedule the view resource for removal.
2898 *
2899 * @dev_priv: Pointer to a device private struct.
2900 * @sw_context: The software context being used for this batch.
2901 * @header: Pointer to the command header in the command stream.
2902 *
2903 * Check that the view exists, and if it was not created using this
2904 * command batch, conditionally make this command a NOP.
2905 */
2906static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2907                                  struct vmw_sw_context *sw_context,
2908                                  SVGA3dCmdHeader *header)
2909{
2910        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2911        struct {
2912                SVGA3dCmdHeader header;
2913                union vmw_view_destroy body;
2914        } *cmd = container_of(header, typeof(*cmd), header);
2915        enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2916        struct vmw_resource *view;
2917        int ret;
2918
2919        if (!ctx_node) {
2920                DRM_ERROR("DX Context not set.\n");
2921                return -EINVAL;
2922        }
2923
2924        ret = vmw_view_remove(sw_context->man,
2925                              cmd->body.view_id, view_type,
2926                              &sw_context->staged_cmd_res,
2927                              &view);
2928        if (ret || !view)
2929                return ret;
2930
2931        /*
2932         * If the view wasn't created during this command batch, it might
2933         * have been removed due to a context swapout, so add a
2934         * relocation to conditionally make this command a NOP to avoid
2935         * device errors.
2936         */
2937        return vmw_resource_relocation_add(&sw_context->res_relocations,
2938                                           view,
2939                                           vmw_ptr_diff(sw_context->buf_start,
2940                                                        &cmd->header.id),
2941                                           vmw_res_rel_cond_nop);
2942}
2943
2944/**
2945 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2946 * command
2947 *
2948 * @dev_priv: Pointer to a device private struct.
2949 * @sw_context: The software context being used for this batch.
2950 * @header: Pointer to the command header in the command stream.
2951 */
2952static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2953                                    struct vmw_sw_context *sw_context,
2954                                    SVGA3dCmdHeader *header)
2955{
2956        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2957        struct vmw_resource *res;
2958        struct {
2959                SVGA3dCmdHeader header;
2960                SVGA3dCmdDXDefineShader body;
2961        } *cmd = container_of(header, typeof(*cmd), header);
2962        int ret;
2963
2964        if (!ctx_node) {
2965                DRM_ERROR("DX Context not set.\n");
2966                return -EINVAL;
2967        }
2968
2969        res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2970        ret = vmw_cotable_notify(res, cmd->body.shaderId);
2971        vmw_resource_unreference(&res);
2972        if (ret)
2973                return ret;
2974
2975        return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2976                                 cmd->body.shaderId, cmd->body.type,
2977                                 &sw_context->staged_cmd_res);
2978}
2979
2980/**
2981 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2982 * command
2983 *
2984 * @dev_priv: Pointer to a device private struct.
2985 * @sw_context: The software context being used for this batch.
2986 * @header: Pointer to the command header in the command stream.
2987 */
2988static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2989                                     struct vmw_sw_context *sw_context,
2990                                     SVGA3dCmdHeader *header)
2991{
2992        struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2993        struct {
2994                SVGA3dCmdHeader header;
2995                SVGA3dCmdDXDestroyShader body;
2996        } *cmd = container_of(header, typeof(*cmd), header);
2997        int ret;
2998
2999        if (!ctx_node) {
3000                DRM_ERROR("DX Context not set.\n");
3001                return -EINVAL;
3002        }
3003
3004        ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3005                                &sw_context->staged_cmd_res);
3006        if (ret)
3007                DRM_ERROR("Could not find shader to remove.\n");
3008
3009        return ret;
3010}
3011
3012/**
3013 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3014 * command
3015 *
3016 * @dev_priv: Pointer to a device private struct.
3017 * @sw_context: The software context being used for this batch.
3018 * @header: Pointer to the command header in the command stream.
3019 */
3020static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3021                                  struct vmw_sw_context *sw_context,
3022                                  SVGA3dCmdHeader *header)
3023{
3024        struct vmw_resource_val_node *ctx_node;
3025        struct vmw_resource_val_node *res_node;
3026        struct vmw_resource *res;
3027        struct {
3028                SVGA3dCmdHeader header;
3029                SVGA3dCmdDXBindShader body;
3030        } *cmd = container_of(header, typeof(*cmd), header);
3031        int ret;
3032
3033        if (cmd->body.cid != SVGA3D_INVALID_ID) {
3034                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3035                                        user_context_converter,
3036                                        &cmd->body.cid, &ctx_node);
3037                if (ret)
3038                        return ret;
3039        } else {
3040                ctx_node = sw_context->dx_ctx_node;
3041                if (!ctx_node) {
3042                        DRM_ERROR("DX Context not set.\n");
3043                        return -EINVAL;
3044                }
3045        }
3046
3047        res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3048                                cmd->body.shid, 0);
3049        if (IS_ERR(res)) {
3050                DRM_ERROR("Could not find shader to bind.\n");
3051                return PTR_ERR(res);
3052        }
3053
3054        ret = vmw_resource_val_add(sw_context, res, &res_node);
3055        if (ret) {
3056                DRM_ERROR("Error creating resource validation node.\n");
3057                goto out_unref;
3058        }
3059
3060
3061        ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3062                                        &cmd->body.mobid,
3063                                        cmd->body.offsetInBytes);
3064out_unref:
3065        vmw_resource_unreference(&res);
3066
3067        return ret;
3068}
3069
3070/**
3071 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3072 *
3073 * @dev_priv: Pointer to a device private struct.
3074 * @sw_context: The software context being used for this batch.
3075 * @header: Pointer to the command header in the command stream.
3076 */
3077static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3078                              struct vmw_sw_context *sw_context,
3079                              SVGA3dCmdHeader *header)
3080{
3081        struct {
3082                SVGA3dCmdHeader header;
3083                SVGA3dCmdDXGenMips body;
3084        } *cmd = container_of(header, typeof(*cmd), header);
3085
3086        return vmw_view_id_val_add(sw_context, vmw_view_sr,
3087                                   cmd->body.shaderResourceViewId);
3088}
3089
3090/**
3091 * vmw_cmd_dx_transfer_from_buffer -
3092 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3093 *
3094 * @dev_priv: Pointer to a device private struct.
3095 * @sw_context: The software context being used for this batch.
3096 * @header: Pointer to the command header in the command stream.
3097 */
3098static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3099                                           struct vmw_sw_context *sw_context,
3100                                           SVGA3dCmdHeader *header)
3101{
3102        struct {
3103                SVGA3dCmdHeader header;
3104                SVGA3dCmdDXTransferFromBuffer body;
3105        } *cmd = container_of(header, typeof(*cmd), header);
3106        int ret;
3107
3108        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3109                                user_surface_converter,
3110                                &cmd->body.srcSid, NULL);
3111        if (ret != 0)
3112                return ret;
3113
3114        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3115                                 user_surface_converter,
3116                                 &cmd->body.destSid, NULL);
3117}
3118
3119static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3120                                struct vmw_sw_context *sw_context,
3121                                void *buf, uint32_t *size)
3122{
3123        uint32_t size_remaining = *size;
3124        uint32_t cmd_id;
3125
3126        cmd_id = ((uint32_t *)buf)[0];
3127        switch (cmd_id) {
3128        case SVGA_CMD_UPDATE:
3129                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3130                break;
3131        case SVGA_CMD_DEFINE_GMRFB:
3132                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3133                break;
3134        case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3135                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3136                break;
3137        case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3138                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3139                break;
3140        default:
3141                DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3142                return -EINVAL;
3143        }
3144
3145        if (*size > size_remaining) {
3146                DRM_ERROR("Invalid SVGA command (size mismatch):"
3147                          " %u.\n", cmd_id);
3148                return -EINVAL;
3149        }
3150
3151        if (unlikely(!sw_context->kernel)) {
3152                DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3153                return -EPERM;
3154        }
3155
3156        if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3157                return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3158
3159        return 0;
3160}
3161
3162static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3163        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3164                    false, false, false),
3165        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3166                    false, false, false),
3167        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3168                    true, false, false),
3169        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3170                    true, false, false),
3171        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3172                    true, false, false),
3173        VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3174                    false, false, false),
3175        VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3176                    false, false, false),
3177        VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3178                    true, false, false),
3179        VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3180                    true, false, false),
3181        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3182                    true, false, false),
3183        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3184                    &vmw_cmd_set_render_target_check, true, false, false),
3185        VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3186                    true, false, false),
3187        VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3188                    true, false, false),
3189        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3190                    true, false, false),
3191        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3192                    true, false, false),
3193        VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3194                    true, false, false),
3195        VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3196                    true, false, false),
3197        VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3198                    true, false, false),
3199        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3200                    false, false, false),
3201        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3202                    true, false, false),
3203        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3204                    true, false, false),
3205        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3206                    true, false, false),
3207        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3208                    true, false, false),
3209        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3210                    true, false, false),
3211        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3212                    true, false, false),
3213        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3214                    true, false, false),
3215        VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3216                    true, false, false),
3217        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3218                    true, false, false),
3219        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3220                    true, false, false),
3221        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3222                    &vmw_cmd_blt_surf_screen_check, false, false, false),
3223        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3224                    false, false, false),
3225        VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3226                    false, false, false),
3227        VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3228                    false, false, false),
3229        VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3230                    false, false, false),
3231        VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3232                    false, false, false),
3233        VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3234                    false, false, false),
3235        VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3236                    false, false, false),
3237        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3238                    false, false, false),
3239        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3240                    false, false, false),
3241        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3242                    false, false, false),
3243        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3244                    false, false, false),
3245        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3246                    false, false, false),
3247        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3248                    false, false, false),
3249        VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3250                    false, false, true),
3251        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3252                    false, false, true),
3253        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3254                    false, false, true),
3255        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3256                    false, false, true),
3257        VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3258                    false, false, true),
3259        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3260                    false, false, true),
3261        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3262                    false, false, true),
3263        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3264                    false, false, true),
3265        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3266                    true, false, true),
3267        VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3268                    false, false, true),
3269        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3270                    true, false, true),
3271        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3272                    &vmw_cmd_update_gb_surface, true, false, true),
3273        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3274                    &vmw_cmd_readback_gb_image, true, false, true),
3275        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3276                    &vmw_cmd_readback_gb_surface, true, false, true),
3277        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3278                    &vmw_cmd_invalidate_gb_image, true, false, true),
3279        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3280                    &vmw_cmd_invalidate_gb_surface, true, false, true),
3281        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3282                    false, false, true),
3283        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3284                    false, false, true),
3285        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3286                    false, false, true),
3287        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3288                    false, false, true),
3289        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3290                    false, false, true),
3291        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3292                    false, false, true),
3293        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3294                    true, false, true),
3295        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3296                    false, false, true),
3297        VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3298                    false, false, false),
3299        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3300                    true, false, true),
3301        VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3302                    true, false, true),
3303        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3304                    true, false, true),
3305        VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3306                    true, false, true),
3307        VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3308                    true, false, true),
3309        VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3310                    false, false, true),
3311        VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3312                    false, false, true),
3313        VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3314                    false, false, true),
3315        VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3316                    false, false, true),
3317        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3318                    false, false, true),
3319        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3320                    false, false, true),
3321        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3322                    false, false, true),
3323        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3324                    false, false, true),
3325        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3326                    false, false, true),
3327        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3328                    false, false, true),
3329        VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3330                    true, false, true),
3331        VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3332                    false, false, true),
3333        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3334                    false, false, true),
3335        VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3336                    false, false, true),
3337        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3338                    false, false, true),
3339
3340        /*
3341         * DX commands
3342         */
3343        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3344                    false, false, true),
3345        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3346                    false, false, true),
3347        VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3348                    false, false, true),
3349        VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3350                    false, false, true),
3351        VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3352                    false, false, true),
3353        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3354                    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3355        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3356                    &vmw_cmd_dx_set_shader_res, true, false, true),
3357        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3358                    true, false, true),
3359        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3360                    true, false, true),
3361        VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3362                    true, false, true),
3363        VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3364                    true, false, true),
3365        VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3366                    true, false, true),
3367        VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3368                    &vmw_cmd_dx_cid_check, true, false, true),
3369        VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3370                    true, false, true),
3371        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3372                    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3373        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3374                    &vmw_cmd_dx_set_index_buffer, true, false, true),
3375        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3376                    &vmw_cmd_dx_set_rendertargets, true, false, true),
3377        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3378                    true, false, true),
3379        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3380                    &vmw_cmd_dx_cid_check, true, false, true),
3381        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3382                    &vmw_cmd_dx_cid_check, true, false, true),
3383        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3384                    true, false, true),
3385        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3386                    true, false, true),
3387        VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3388                    true, false, true),
3389        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3390                    &vmw_cmd_dx_cid_check, true, false, true),
3391        VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3392                    true, false, true),
3393        VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3394                    true, false, true),
3395        VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3396                    true, false, true),
3397        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3398                    true, false, true),
3399        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3400                    true, false, true),
3401        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3402                    true, false, true),
3403        VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3404                    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3405        VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3406                    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3407        VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3408                    true, false, true),
3409        VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3410                    true, false, true),
3411        VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3412                    &vmw_cmd_dx_check_subresource, true, false, true),
3413        VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3414                    &vmw_cmd_dx_check_subresource, true, false, true),
3415        VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3416                    &vmw_cmd_dx_check_subresource, true, false, true),
3417        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3418                    &vmw_cmd_dx_view_define, true, false, true),
3419        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3420                    &vmw_cmd_dx_view_remove, true, false, true),
3421        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3422                    &vmw_cmd_dx_view_define, true, false, true),
3423        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3424                    &vmw_cmd_dx_view_remove, true, false, true),
3425        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3426                    &vmw_cmd_dx_view_define, true, false, true),
3427        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3428                    &vmw_cmd_dx_view_remove, true, false, true),
3429        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3430                    &vmw_cmd_dx_so_define, true, false, true),
3431        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3432                    &vmw_cmd_dx_cid_check, true, false, true),
3433        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3434                    &vmw_cmd_dx_so_define, true, false, true),
3435        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3436                    &vmw_cmd_dx_cid_check, true, false, true),
3437        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3438                    &vmw_cmd_dx_so_define, true, false, true),
3439        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3440                    &vmw_cmd_dx_cid_check, true, false, true),
3441        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3442                    &vmw_cmd_dx_so_define, true, false, true),
3443        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3444                    &vmw_cmd_dx_cid_check, true, false, true),
3445        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3446                    &vmw_cmd_dx_so_define, true, false, true),
3447        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3448                    &vmw_cmd_dx_cid_check, true, false, true),
3449        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3450                    &vmw_cmd_dx_define_shader, true, false, true),
3451        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3452                    &vmw_cmd_dx_destroy_shader, true, false, true),
3453        VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3454                    &vmw_cmd_dx_bind_shader, true, false, true),
3455        VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3456                    &vmw_cmd_dx_so_define, true, false, true),
3457        VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3458                    &vmw_cmd_dx_cid_check, true, false, true),
3459        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3460                    true, false, true),
3461        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3462                    &vmw_cmd_dx_set_so_targets, true, false, true),
3463        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3464                    &vmw_cmd_dx_cid_check, true, false, true),
3465        VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3466                    &vmw_cmd_dx_cid_check, true, false, true),
3467        VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3468                    &vmw_cmd_buffer_copy_check, true, false, true),
3469        VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3470                    &vmw_cmd_pred_copy_check, true, false, true),
3471        VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3472                    &vmw_cmd_dx_transfer_from_buffer,
3473                    true, false, true),
3474};
3475
3476bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3477{
3478        u32 cmd_id = ((u32 *) buf)[0];
3479
3480        if (cmd_id >= SVGA_CMD_MAX) {
3481                SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3482                const struct vmw_cmd_entry *entry;
3483
3484                *size = header->size + sizeof(SVGA3dCmdHeader);
3485                cmd_id = header->id;
3486                if (cmd_id >= SVGA_3D_CMD_MAX)
3487                        return false;
3488
3489                cmd_id -= SVGA_3D_CMD_BASE;
3490                entry = &vmw_cmd_entries[cmd_id];
3491                *cmd = entry->cmd_name;
3492                return true;
3493        }
3494
3495        switch (cmd_id) {
3496        case SVGA_CMD_UPDATE:
3497                *cmd = "SVGA_CMD_UPDATE";
3498                *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3499                break;
3500        case SVGA_CMD_DEFINE_GMRFB:
3501                *cmd = "SVGA_CMD_DEFINE_GMRFB";
3502                *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3503                break;
3504        case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3505                *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3506                *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3507                break;
3508        case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3509                *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3510                *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3511                break;
3512        default:
3513                *cmd = "UNKNOWN";
3514                *size = 0;
3515                return false;
3516        }
3517
3518        return true;
3519}
3520
3521static int vmw_cmd_check(struct vmw_private *dev_priv,
3522                         struct vmw_sw_context *sw_context,
3523                         void *buf, uint32_t *size)
3524{
3525        uint32_t cmd_id;
3526        uint32_t size_remaining = *size;
3527        SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3528        int ret;
3529        const struct vmw_cmd_entry *entry;
3530        bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3531
3532        cmd_id = ((uint32_t *)buf)[0];
3533        /* Handle any none 3D commands */
3534        if (unlikely(cmd_id < SVGA_CMD_MAX))
3535                return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3536
3537
3538        cmd_id = header->id;
3539        *size = header->size + sizeof(SVGA3dCmdHeader);
3540
3541        cmd_id -= SVGA_3D_CMD_BASE;
3542        if (unlikely(*size > size_remaining))
3543                goto out_invalid;
3544
3545        if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3546                goto out_invalid;
3547
3548        entry = &vmw_cmd_entries[cmd_id];
3549        if (unlikely(!entry->func))
3550                goto out_invalid;
3551
3552        if (unlikely(!entry->user_allow && !sw_context->kernel))
3553                goto out_privileged;
3554
3555        if (unlikely(entry->gb_disable && gb))
3556                goto out_old;
3557
3558        if (unlikely(entry->gb_enable && !gb))
3559                goto out_new;
3560
3561        ret = entry->func(dev_priv, sw_context, header);
3562        if (unlikely(ret != 0))
3563                goto out_invalid;
3564
3565        return 0;
3566out_invalid:
3567        DRM_ERROR("Invalid SVGA3D command: %d\n",
3568                  cmd_id + SVGA_3D_CMD_BASE);
3569        return -EINVAL;
3570out_privileged:
3571        DRM_ERROR("Privileged SVGA3D command: %d\n",
3572                  cmd_id + SVGA_3D_CMD_BASE);
3573        return -EPERM;
3574out_old:
3575        DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3576                  cmd_id + SVGA_3D_CMD_BASE);
3577        return -EINVAL;
3578out_new:
3579        DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3580                  cmd_id + SVGA_3D_CMD_BASE);
3581        return -EINVAL;
3582}
3583
3584static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3585                             struct vmw_sw_context *sw_context,
3586                             void *buf,
3587                             uint32_t size)
3588{
3589        int32_t cur_size = size;
3590        int ret;
3591
3592        sw_context->buf_start = buf;
3593
3594        while (cur_size > 0) {
3595                size = cur_size;
3596                ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3597                if (unlikely(ret != 0))
3598                        return ret;
3599                buf = (void *)((unsigned long) buf + size);
3600                cur_size -= size;
3601        }
3602
3603        if (unlikely(cur_size != 0)) {
3604                DRM_ERROR("Command verifier out of sync.\n");
3605                return -EINVAL;
3606        }
3607
3608        return 0;
3609}
3610
3611static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3612{
3613        sw_context->cur_reloc = 0;
3614}
3615
3616static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3617{
3618        uint32_t i;
3619        struct vmw_relocation *reloc;
3620        struct ttm_validate_buffer *validate;
3621        struct ttm_buffer_object *bo;
3622
3623        for (i = 0; i < sw_context->cur_reloc; ++i) {
3624                reloc = &sw_context->relocs[i];
3625                validate = &sw_context->val_bufs[reloc->index].base;
3626                bo = validate->bo;
3627                switch (bo->mem.mem_type) {
3628                case TTM_PL_VRAM:
3629                        reloc->location->offset += bo->offset;
3630                        reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3631                        break;
3632                case VMW_PL_GMR:
3633                        reloc->location->gmrId = bo->mem.start;
3634                        break;
3635                case VMW_PL_MOB:
3636                        *reloc->mob_loc = bo->mem.start;
3637                        break;
3638                default:
3639                        BUG();
3640                }
3641        }
3642        vmw_free_relocations(sw_context);
3643}
3644
3645/**
3646 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3647 * all resources referenced by it.
3648 *
3649 * @list: The resource list.
3650 */
3651static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3652                                          struct list_head *list)
3653{
3654        struct vmw_resource_val_node *val, *val_next;
3655
3656        /*
3657         * Drop references to resources held during command submission.
3658         */
3659
3660        list_for_each_entry_safe(val, val_next, list, head) {
3661                list_del_init(&val->head);
3662                vmw_resource_unreference(&val->res);
3663
3664                if (val->staged_bindings) {
3665                        if (val->staged_bindings != sw_context->staged_bindings)
3666                                vmw_binding_state_free(val->staged_bindings);
3667                        else
3668                                sw_context->staged_bindings_inuse = false;
3669                        val->staged_bindings = NULL;
3670                }
3671
3672                kfree(val);
3673        }
3674}
3675
3676static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3677{
3678        struct vmw_validate_buffer *entry, *next;
3679        struct vmw_resource_val_node *val;
3680
3681        /*
3682         * Drop references to DMA buffers held during command submission.
3683         */
3684        list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3685                                 base.head) {
3686                list_del(&entry->base.head);
3687                ttm_bo_unref(&entry->base.bo);
3688                (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3689                sw_context->cur_val_buf--;
3690        }
3691        BUG_ON(sw_context->cur_val_buf != 0);
3692
3693        list_for_each_entry(val, &sw_context->resource_list, head)
3694                (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3695}
3696
3697int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3698                               struct ttm_buffer_object *bo,
3699                               bool interruptible,
3700                               bool validate_as_mob)
3701{
3702        struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3703                                                  base);
3704        int ret;
3705
3706        if (vbo->pin_count > 0)
3707                return 0;
3708
3709        if (validate_as_mob)
3710                return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3711                                       false);
3712
3713        /**
3714         * Put BO in VRAM if there is space, otherwise as a GMR.
3715         * If there is no space in VRAM and GMR ids are all used up,
3716         * start evicting GMRs to make room. If the DMA buffer can't be
3717         * used as a GMR, this will return -ENOMEM.
3718         */
3719
3720        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3721                              false);
3722        if (likely(ret == 0 || ret == -ERESTARTSYS))
3723                return ret;
3724
3725        /**
3726         * If that failed, try VRAM again, this time evicting
3727         * previous contents.
3728         */
3729
3730        ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3731        return ret;
3732}
3733
3734static int vmw_validate_buffers(struct vmw_private *dev_priv,
3735                                struct vmw_sw_context *sw_context)
3736{
3737        struct vmw_validate_buffer *entry;
3738        int ret;
3739
3740        list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3741                ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3742                                                 true,
3743                                                 entry->validate_as_mob);
3744                if (unlikely(ret != 0))
3745                        return ret;
3746        }
3747        return 0;
3748}
3749
3750static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3751                                 uint32_t size)
3752{
3753        if (likely(sw_context->cmd_bounce_size >= size))
3754                return 0;
3755
3756        if (sw_context->cmd_bounce_size == 0)
3757                sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3758
3759        while (sw_context->cmd_bounce_size < size) {
3760                sw_context->cmd_bounce_size =
3761                        PAGE_ALIGN(sw_context->cmd_bounce_size +
3762                                   (sw_context->cmd_bounce_size >> 1));
3763        }
3764
3765        vfree(sw_context->cmd_bounce);
3766        sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3767
3768        if (sw_context->cmd_bounce == NULL) {
3769                DRM_ERROR("Failed to allocate command bounce buffer.\n");
3770                sw_context->cmd_bounce_size = 0;
3771                return -ENOMEM;
3772        }
3773
3774        return 0;
3775}
3776
3777/**
3778 * vmw_execbuf_fence_commands - create and submit a command stream fence
3779 *
3780 * Creates a fence object and submits a command stream marker.
3781 * If this fails for some reason, We sync the fifo and return NULL.
3782 * It is then safe to fence buffers with a NULL pointer.
3783 *
3784 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3785 * a userspace handle if @p_handle is not NULL, otherwise not.
3786 */
3787
3788int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3789                               struct vmw_private *dev_priv,
3790                               struct vmw_fence_obj **p_fence,
3791                               uint32_t *p_handle)
3792{
3793        uint32_t sequence;
3794        int ret;
3795        bool synced = false;
3796
3797        /* p_handle implies file_priv. */
3798        BUG_ON(p_handle != NULL && file_priv == NULL);
3799
3800        ret = vmw_fifo_send_fence(dev_priv, &sequence);
3801        if (unlikely(ret != 0)) {
3802                DRM_ERROR("Fence submission error. Syncing.\n");
3803                synced = true;
3804        }
3805
3806        if (p_handle != NULL)
3807                ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3808                                            sequence, p_fence, p_handle);
3809        else
3810                ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3811
3812        if (unlikely(ret != 0 && !synced)) {
3813                (void) vmw_fallback_wait(dev_priv, false, false,
3814                                         sequence, false,
3815                                         VMW_FENCE_WAIT_TIMEOUT);
3816                *p_fence = NULL;
3817        }
3818
3819        return 0;
3820}
3821
3822/**
3823 * vmw_execbuf_copy_fence_user - copy fence object information to
3824 * user-space.
3825 *
3826 * @dev_priv: Pointer to a vmw_private struct.
3827 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3828 * @ret: Return value from fence object creation.
3829 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3830 * which the information should be copied.
3831 * @fence: Pointer to the fenc object.
3832 * @fence_handle: User-space fence handle.
3833 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3834 * @sync_file:  Only used to clean up in case of an error in this function.
3835 *
3836 * This function copies fence information to user-space. If copying fails,
3837 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3838 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3839 * the error will hopefully be detected.
3840 * Also if copying fails, user-space will be unable to signal the fence
3841 * object so we wait for it immediately, and then unreference the
3842 * user-space reference.
3843 */
3844void
3845vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3846                            struct vmw_fpriv *vmw_fp,
3847                            int ret,
3848                            struct drm_vmw_fence_rep __user *user_fence_rep,
3849                            struct vmw_fence_obj *fence,
3850                            uint32_t fence_handle,
3851                            int32_t out_fence_fd,
3852                            struct sync_file *sync_file)
3853{
3854        struct drm_vmw_fence_rep fence_rep;
3855
3856        if (user_fence_rep == NULL)
3857                return;
3858
3859        memset(&fence_rep, 0, sizeof(fence_rep));
3860
3861        fence_rep.error = ret;
3862        fence_rep.fd = out_fence_fd;
3863        if (ret == 0) {
3864                BUG_ON(fence == NULL);
3865
3866                fence_rep.handle = fence_handle;
3867                fence_rep.seqno = fence->base.seqno;
3868                vmw_update_seqno(dev_priv, &dev_priv->fifo);
3869                fence_rep.passed_seqno = dev_priv->last_read_seqno;
3870        }
3871
3872        /*
3873         * copy_to_user errors will be detected by user space not
3874         * seeing fence_rep::error filled in. Typically
3875         * user-space would have pre-set that member to -EFAULT.
3876         */
3877        ret = copy_to_user(user_fence_rep, &fence_rep,
3878                           sizeof(fence_rep));
3879
3880        /*
3881         * User-space lost the fence object. We need to sync
3882         * and unreference the handle.
3883         */
3884        if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3885                if (sync_file)
3886                        fput(sync_file->file);
3887
3888                if (fence_rep.fd != -1) {
3889                        put_unused_fd(fence_rep.fd);
3890                        fence_rep.fd = -1;
3891                }
3892
3893                ttm_ref_object_base_unref(vmw_fp->tfile,
3894                                          fence_handle, TTM_REF_USAGE);
3895                DRM_ERROR("Fence copy error. Syncing.\n");
3896                (void) vmw_fence_obj_wait(fence, false, false,
3897                                          VMW_FENCE_WAIT_TIMEOUT);
3898        }
3899}
3900
3901/**
3902 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3903 * the fifo.
3904 *
3905 * @dev_priv: Pointer to a device private structure.
3906 * @kernel_commands: Pointer to the unpatched command batch.
3907 * @command_size: Size of the unpatched command batch.
3908 * @sw_context: Structure holding the relocation lists.
3909 *
3910 * Side effects: If this function returns 0, then the command batch
3911 * pointed to by @kernel_commands will have been modified.
3912 */
3913static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3914                                   void *kernel_commands,
3915                                   u32 command_size,
3916                                   struct vmw_sw_context *sw_context)
3917{
3918        void *cmd;
3919
3920        if (sw_context->dx_ctx_node)
3921                cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3922                                          sw_context->dx_ctx_node->res->id);
3923        else
3924                cmd = vmw_fifo_reserve(dev_priv, command_size);
3925        if (!cmd) {
3926                DRM_ERROR("Failed reserving fifo space for commands.\n");
3927                return -ENOMEM;
3928        }
3929
3930        vmw_apply_relocations(sw_context);
3931        memcpy(cmd, kernel_commands, command_size);
3932        vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3933        vmw_resource_relocations_free(&sw_context->res_relocations);
3934        vmw_fifo_commit(dev_priv, command_size);
3935
3936        return 0;
3937}
3938
3939/**
3940 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3941 * the command buffer manager.
3942 *
3943 * @dev_priv: Pointer to a device private structure.
3944 * @header: Opaque handle to the command buffer allocation.
3945 * @command_size: Size of the unpatched command batch.
3946 * @sw_context: Structure holding the relocation lists.
3947 *
3948 * Side effects: If this function returns 0, then the command buffer
3949 * represented by @header will have been modified.
3950 */
3951static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3952                                     struct vmw_cmdbuf_header *header,
3953                                     u32 command_size,
3954                                     struct vmw_sw_context *sw_context)
3955{
3956        u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3957                  SVGA3D_INVALID_ID);
3958        void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3959                                       id, false, header);
3960
3961        vmw_apply_relocations(sw_context);
3962        vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3963        vmw_resource_relocations_free(&sw_context->res_relocations);
3964        vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3965
3966        return 0;
3967}
3968
3969/**
3970 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3971 * submission using a command buffer.
3972 *
3973 * @dev_priv: Pointer to a device private structure.
3974 * @user_commands: User-space pointer to the commands to be submitted.
3975 * @command_size: Size of the unpatched command batch.
3976 * @header: Out parameter returning the opaque pointer to the command buffer.
3977 *
3978 * This function checks whether we can use the command buffer manager for
3979 * submission and if so, creates a command buffer of suitable size and
3980 * copies the user data into that buffer.
3981 *
3982 * On successful return, the function returns a pointer to the data in the
3983 * command buffer and *@header is set to non-NULL.
3984 * If command buffers could not be used, the function will return the value
3985 * of @kernel_commands on function call. That value may be NULL. In that case,
3986 * the value of *@header will be set to NULL.
3987 * If an error is encountered, the function will return a pointer error value.
3988 * If the function is interrupted by a signal while sleeping, it will return
3989 * -ERESTARTSYS casted to a pointer error value.
3990 */
3991static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3992                                void __user *user_commands,
3993                                void *kernel_commands,
3994                                u32 command_size,
3995                                struct vmw_cmdbuf_header **header)
3996{
3997        size_t cmdbuf_size;
3998        int ret;
3999
4000        *header = NULL;
4001        if (command_size > SVGA_CB_MAX_SIZE) {
4002                DRM_ERROR("Command buffer is too large.\n");
4003                return ERR_PTR(-EINVAL);
4004        }
4005
4006        if (!dev_priv->cman || kernel_commands)
4007                return kernel_commands;
4008
4009        /* If possible, add a little space for fencing. */
4010        cmdbuf_size = command_size + 512;
4011        cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4012        kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4013                                           true, header);
4014        if (IS_ERR(kernel_commands))
4015                return kernel_commands;
4016
4017        ret = copy_from_user(kernel_commands, user_commands,
4018                             command_size);
4019        if (ret) {
4020                DRM_ERROR("Failed copying commands.\n");
4021                vmw_cmdbuf_header_free(*header);
4022                *header = NULL;
4023                return ERR_PTR(-EFAULT);
4024        }
4025
4026        return kernel_commands;
4027}
4028
4029static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4030                                   struct vmw_sw_context *sw_context,
4031                                   uint32_t handle)
4032{
4033        struct vmw_resource_val_node *ctx_node;
4034        struct vmw_resource *res;
4035        int ret;
4036
4037        if (handle == SVGA3D_INVALID_ID)
4038                return 0;
4039
4040        ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4041                                              handle, user_context_converter,
4042                                              &res);
4043        if (unlikely(ret != 0)) {
4044                DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4045                          (unsigned) handle);
4046                return ret;
4047        }
4048
4049        ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4050        if (unlikely(ret != 0))
4051                goto out_err;
4052
4053        sw_context->dx_ctx_node = ctx_node;
4054        sw_context->man = vmw_context_res_man(res);
4055out_err:
4056        vmw_resource_unreference(&res);
4057        return ret;
4058}
4059
4060int vmw_execbuf_process(struct drm_file *file_priv,
4061                        struct vmw_private *dev_priv,
4062                        void __user *user_commands,
4063                        void *kernel_commands,
4064                        uint32_t command_size,
4065                        uint64_t throttle_us,
4066                        uint32_t dx_context_handle,
4067                        struct drm_vmw_fence_rep __user *user_fence_rep,
4068                        struct vmw_fence_obj **out_fence,
4069                        uint32_t flags)
4070{
4071        struct vmw_sw_context *sw_context = &dev_priv->ctx;
4072        struct vmw_fence_obj *fence = NULL;
4073        struct vmw_resource *error_resource;
4074        struct list_head resource_list;
4075        struct vmw_cmdbuf_header *header;
4076        struct ww_acquire_ctx ticket;
4077        uint32_t handle;
4078        int ret;
4079        int32_t out_fence_fd = -1;
4080        struct sync_file *sync_file = NULL;
4081
4082
4083        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4084                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4085                if (out_fence_fd < 0) {
4086                        DRM_ERROR("Failed to get a fence file descriptor.\n");
4087                        return out_fence_fd;
4088                }
4089        }
4090
4091        if (throttle_us) {
4092                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4093                                   throttle_us);
4094
4095                if (ret)
4096                        goto out_free_fence_fd;
4097        }
4098
4099        kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4100                                             kernel_commands, command_size,
4101                                             &header);
4102        if (IS_ERR(kernel_commands)) {
4103                ret = PTR_ERR(kernel_commands);
4104                goto out_free_fence_fd;
4105        }
4106
4107        ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4108        if (ret) {
4109                ret = -ERESTARTSYS;
4110                goto out_free_header;
4111        }
4112
4113        sw_context->kernel = false;
4114        if (kernel_commands == NULL) {
4115                ret = vmw_resize_cmd_bounce(sw_context, command_size);
4116                if (unlikely(ret != 0))
4117                        goto out_unlock;
4118
4119
4120                ret = copy_from_user(sw_context->cmd_bounce,
4121                                     user_commands, command_size);
4122
4123                if (unlikely(ret != 0)) {
4124                        ret = -EFAULT;
4125                        DRM_ERROR("Failed copying commands.\n");
4126                        goto out_unlock;
4127                }
4128                kernel_commands = sw_context->cmd_bounce;
4129        } else if (!header)
4130                sw_context->kernel = true;
4131
4132        sw_context->fp = vmw_fpriv(file_priv);
4133        sw_context->cur_reloc = 0;
4134        sw_context->cur_val_buf = 0;
4135        INIT_LIST_HEAD(&sw_context->resource_list);
4136        INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4137        sw_context->cur_query_bo = dev_priv->pinned_bo;
4138        sw_context->last_query_ctx = NULL;
4139        sw_context->needs_post_query_barrier = false;
4140        sw_context->dx_ctx_node = NULL;
4141        sw_context->dx_query_mob = NULL;
4142        sw_context->dx_query_ctx = NULL;
4143        memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4144        INIT_LIST_HEAD(&sw_context->validate_nodes);
4145        INIT_LIST_HEAD(&sw_context->res_relocations);
4146        if (sw_context->staged_bindings)
4147                vmw_binding_state_reset(sw_context->staged_bindings);
4148
4149        if (!sw_context->res_ht_initialized) {
4150                ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4151                if (unlikely(ret != 0))
4152                        goto out_unlock;
4153                sw_context->res_ht_initialized = true;
4154        }
4155        INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4156        INIT_LIST_HEAD(&resource_list);
4157        ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4158        if (unlikely(ret != 0)) {
4159                list_splice_init(&sw_context->ctx_resource_list,
4160                                 &sw_context->resource_list);
4161                goto out_err_nores;
4162        }
4163
4164        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4165                                command_size);
4166        /*
4167         * Merge the resource lists before checking the return status
4168         * from vmd_cmd_check_all so that all the open hashtabs will
4169         * be handled properly even if vmw_cmd_check_all fails.
4170         */
4171        list_splice_init(&sw_context->ctx_resource_list,
4172                         &sw_context->resource_list);
4173
4174        if (unlikely(ret != 0))
4175                goto out_err_nores;
4176
4177        ret = vmw_resources_reserve(sw_context);
4178        if (unlikely(ret != 0))
4179                goto out_err_nores;
4180
4181        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4182                                     true, NULL);
4183        if (unlikely(ret != 0))
4184                goto out_err_nores;
4185
4186        ret = vmw_validate_buffers(dev_priv, sw_context);
4187        if (unlikely(ret != 0))
4188                goto out_err;
4189
4190        ret = vmw_resources_validate(sw_context);
4191        if (unlikely(ret != 0))
4192                goto out_err;
4193
4194        ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4195        if (unlikely(ret != 0)) {
4196                ret = -ERESTARTSYS;
4197                goto out_err;
4198        }
4199
4200        if (dev_priv->has_mob) {
4201                ret = vmw_rebind_contexts(sw_context);
4202                if (unlikely(ret != 0))
4203                        goto out_unlock_binding;
4204        }
4205
4206        if (!header) {
4207                ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4208                                              command_size, sw_context);
4209        } else {
4210                ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4211                                                sw_context);
4212                header = NULL;
4213        }
4214        mutex_unlock(&dev_priv->binding_mutex);
4215        if (ret)
4216                goto out_err;
4217
4218        vmw_query_bo_switch_commit(dev_priv, sw_context);
4219        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4220                                         &fence,
4221                                         (user_fence_rep) ? &handle : NULL);
4222        /*
4223         * This error is harmless, because if fence submission fails,
4224         * vmw_fifo_send_fence will sync. The error will be propagated to
4225         * user-space in @fence_rep
4226         */
4227
4228        if (ret != 0)
4229                DRM_ERROR("Fence submission error. Syncing.\n");
4230
4231        vmw_resources_unreserve(sw_context, false);
4232
4233        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4234                                    (void *) fence);
4235
4236        if (unlikely(dev_priv->pinned_bo != NULL &&
4237                     !dev_priv->query_cid_valid))
4238                __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4239
4240        vmw_clear_validations(sw_context);
4241
4242        /*
4243         * If anything fails here, give up trying to export the fence
4244         * and do a sync since the user mode will not be able to sync
4245         * the fence itself.  This ensures we are still functionally
4246         * correct.
4247         */
4248        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4249
4250                sync_file = sync_file_create(&fence->base);
4251                if (!sync_file) {
4252                        DRM_ERROR("Unable to create sync file for fence\n");
4253                        put_unused_fd(out_fence_fd);
4254                        out_fence_fd = -1;
4255
4256                        (void) vmw_fence_obj_wait(fence, false, false,
4257                                                  VMW_FENCE_WAIT_TIMEOUT);
4258                } else {
4259                        /* Link the fence with the FD created earlier */
4260                        fd_install(out_fence_fd, sync_file->file);
4261                }
4262        }
4263
4264        vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4265                                    user_fence_rep, fence, handle,
4266                                    out_fence_fd, sync_file);
4267
4268        /* Don't unreference when handing fence out */
4269        if (unlikely(out_fence != NULL)) {
4270                *out_fence = fence;
4271                fence = NULL;
4272        } else if (likely(fence != NULL)) {
4273                vmw_fence_obj_unreference(&fence);
4274        }
4275
4276        list_splice_init(&sw_context->resource_list, &resource_list);
4277        vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4278        mutex_unlock(&dev_priv->cmdbuf_mutex);
4279
4280        /*
4281         * Unreference resources outside of the cmdbuf_mutex to
4282         * avoid deadlocks in resource destruction paths.
4283         */
4284        vmw_resource_list_unreference(sw_context, &resource_list);
4285
4286        return 0;
4287
4288out_unlock_binding:
4289        mutex_unlock(&dev_priv->binding_mutex);
4290out_err:
4291        ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4292out_err_nores:
4293        vmw_resources_unreserve(sw_context, true);
4294        vmw_resource_relocations_free(&sw_context->res_relocations);
4295        vmw_free_relocations(sw_context);
4296        vmw_clear_validations(sw_context);
4297        if (unlikely(dev_priv->pinned_bo != NULL &&
4298                     !dev_priv->query_cid_valid))
4299                __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4300out_unlock:
4301        list_splice_init(&sw_context->resource_list, &resource_list);
4302        error_resource = sw_context->error_resource;
4303        sw_context->error_resource = NULL;
4304        vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4305        mutex_unlock(&dev_priv->cmdbuf_mutex);
4306
4307        /*
4308         * Unreference resources outside of the cmdbuf_mutex to
4309         * avoid deadlocks in resource destruction paths.
4310         */
4311        vmw_resource_list_unreference(sw_context, &resource_list);
4312        if (unlikely(error_resource != NULL))
4313                vmw_resource_unreference(&error_resource);
4314out_free_header:
4315        if (header)
4316                vmw_cmdbuf_header_free(header);
4317out_free_fence_fd:
4318        if (out_fence_fd >= 0)
4319                put_unused_fd(out_fence_fd);
4320
4321        return ret;
4322}
4323
4324/**
4325 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4326 *
4327 * @dev_priv: The device private structure.
4328 *
4329 * This function is called to idle the fifo and unpin the query buffer
4330 * if the normal way to do this hits an error, which should typically be
4331 * extremely rare.
4332 */
4333static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4334{
4335        DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4336
4337        (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4338        vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4339        if (dev_priv->dummy_query_bo_pinned) {
4340                vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4341                dev_priv->dummy_query_bo_pinned = false;
4342        }
4343}
4344
4345
4346/**
4347 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4348 * query bo.
4349 *
4350 * @dev_priv: The device private structure.
4351 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4352 * _after_ a query barrier that flushes all queries touching the current
4353 * buffer pointed to by @dev_priv->pinned_bo
4354 *
4355 * This function should be used to unpin the pinned query bo, or
4356 * as a query barrier when we need to make sure that all queries have
4357 * finished before the next fifo command. (For example on hardware
4358 * context destructions where the hardware may otherwise leak unfinished
4359 * queries).
4360 *
4361 * This function does not return any failure codes, but make attempts
4362 * to do safe unpinning in case of errors.
4363 *
4364 * The function will synchronize on the previous query barrier, and will
4365 * thus not finish until that barrier has executed.
4366 *
4367 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4368 * before calling this function.
4369 */
4370void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4371                                     struct vmw_fence_obj *fence)
4372{
4373        int ret = 0;
4374        struct list_head validate_list;
4375        struct ttm_validate_buffer pinned_val, query_val;
4376        struct vmw_fence_obj *lfence = NULL;
4377        struct ww_acquire_ctx ticket;
4378
4379        if (dev_priv->pinned_bo == NULL)
4380                goto out_unlock;
4381
4382        INIT_LIST_HEAD(&validate_list);
4383
4384        pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4385        pinned_val.shared = false;
4386        list_add_tail(&pinned_val.head, &validate_list);
4387
4388        query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4389        query_val.shared = false;
4390        list_add_tail(&query_val.head, &validate_list);
4391
4392        ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4393                                     false, NULL);
4394        if (unlikely(ret != 0)) {
4395                vmw_execbuf_unpin_panic(dev_priv);
4396                goto out_no_reserve;
4397        }
4398
4399        if (dev_priv->query_cid_valid) {
4400                BUG_ON(fence != NULL);
4401                ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4402                if (unlikely(ret != 0)) {
4403                        vmw_execbuf_unpin_panic(dev_priv);
4404                        goto out_no_emit;
4405                }
4406                dev_priv->query_cid_valid = false;
4407        }
4408
4409        vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4410        if (dev_priv->dummy_query_bo_pinned) {
4411                vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4412                dev_priv->dummy_query_bo_pinned = false;
4413        }
4414        if (fence == NULL) {
4415                (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4416                                                  NULL);
4417                fence = lfence;
4418        }
4419        ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4420        if (lfence != NULL)
4421                vmw_fence_obj_unreference(&lfence);
4422
4423        ttm_bo_unref(&query_val.bo);
4424        ttm_bo_unref(&pinned_val.bo);
4425        vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4426out_unlock:
4427        return;
4428
4429out_no_emit:
4430        ttm_eu_backoff_reservation(&ticket, &validate_list);
4431out_no_reserve:
4432        ttm_bo_unref(&query_val.bo);
4433        ttm_bo_unref(&pinned_val.bo);
4434        vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4435}
4436
4437/**
4438 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4439 * query bo.
4440 *
4441 * @dev_priv: The device private structure.
4442 *
4443 * This function should be used to unpin the pinned query bo, or
4444 * as a query barrier when we need to make sure that all queries have
4445 * finished before the next fifo command. (For example on hardware
4446 * context destructions where the hardware may otherwise leak unfinished
4447 * queries).
4448 *
4449 * This function does not return any failure codes, but make attempts
4450 * to do safe unpinning in case of errors.
4451 *
4452 * The function will synchronize on the previous query barrier, and will
4453 * thus not finish until that barrier has executed.
4454 */
4455void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4456{
4457        mutex_lock(&dev_priv->cmdbuf_mutex);
4458        if (dev_priv->query_cid_valid)
4459                __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4460        mutex_unlock(&dev_priv->cmdbuf_mutex);
4461}
4462
4463int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4464                      struct drm_file *file_priv, size_t size)
4465{
4466        struct vmw_private *dev_priv = vmw_priv(dev);
4467        struct drm_vmw_execbuf_arg arg;
4468        int ret;
4469        static const size_t copy_offset[] = {
4470                offsetof(struct drm_vmw_execbuf_arg, context_handle),
4471                sizeof(struct drm_vmw_execbuf_arg)};
4472        struct dma_fence *in_fence = NULL;
4473
4474        if (unlikely(size < copy_offset[0])) {
4475                DRM_ERROR("Invalid command size, ioctl %d\n",
4476                          DRM_VMW_EXECBUF);
4477                return -EINVAL;
4478        }
4479
4480        if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4481                return -EFAULT;
4482
4483        /*
4484         * Extend the ioctl argument while
4485         * maintaining backwards compatibility:
4486         * We take different code paths depending on the value of
4487         * arg.version.
4488         */
4489
4490        if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4491                     arg.version == 0)) {
4492                DRM_ERROR("Incorrect execbuf version.\n");
4493                return -EINVAL;
4494        }
4495
4496        if (arg.version > 1 &&
4497            copy_from_user(&arg.context_handle,
4498                           (void __user *) (data + copy_offset[0]),
4499                           copy_offset[arg.version - 1] -
4500                           copy_offset[0]) != 0)
4501                return -EFAULT;
4502
4503        switch (arg.version) {
4504        case 1:
4505                arg.context_handle = (uint32_t) -1;
4506                break;
4507        case 2:
4508        default:
4509                break;
4510        }
4511
4512
4513        /* If imported a fence FD from elsewhere, then wait on it */
4514        if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4515                in_fence = sync_file_get_fence(arg.imported_fence_fd);
4516
4517                if (!in_fence) {
4518                        DRM_ERROR("Cannot get imported fence\n");
4519                        return -EINVAL;
4520                }
4521
4522                ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4523                if (ret)
4524                        goto out;
4525        }
4526
4527        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4528        if (unlikely(ret != 0))
4529                return ret;
4530
4531        ret = vmw_execbuf_process(file_priv, dev_priv,
4532                                  (void __user *)(unsigned long)arg.commands,
4533                                  NULL, arg.command_size, arg.throttle_us,
4534                                  arg.context_handle,
4535                                  (void __user *)(unsigned long)arg.fence_rep,
4536                                  NULL,
4537                                  arg.flags);
4538        ttm_read_unlock(&dev_priv->reservation_sem);
4539        if (unlikely(ret != 0))
4540                goto out;
4541
4542        vmw_kms_cursor_post_execbuf(dev_priv);
4543
4544out:
4545        if (in_fence)
4546                dma_fence_put(in_fence);
4547        return ret;
4548}
4549