linux/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include <drm/ttm/ttm_bo_api.h>
  31#include <drm/ttm/ttm_placement.h>
  32
  33#define VMW_RES_HT_ORDER 12
  34
  35/**
  36 * struct vmw_resource_relocation - Relocation info for resources
  37 *
  38 * @head: List head for the software context's relocation list.
  39 * @res: Non-ref-counted pointer to the resource.
  40 * @offset: Offset of 4 byte entries into the command buffer where the
  41 * id that needs fixup is located.
  42 */
  43struct vmw_resource_relocation {
  44        struct list_head head;
  45        const struct vmw_resource *res;
  46        unsigned long offset;
  47};
  48
  49/**
  50 * struct vmw_resource_val_node - Validation info for resources
  51 *
  52 * @head: List head for the software context's resource list.
  53 * @hash: Hash entry for quick resouce to val_node lookup.
  54 * @res: Ref-counted pointer to the resource.
  55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  56 * @new_backup: Refcounted pointer to the new backup buffer.
  57 * @staged_bindings: If @res is a context, tracks bindings set up during
  58 * the command batch. Otherwise NULL.
  59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  60 * @first_usage: Set to true the first time the resource is referenced in
  61 * the command stream.
  62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
  63 * reservation. The command stream will provide one.
  64 */
  65struct vmw_resource_val_node {
  66        struct list_head head;
  67        struct drm_hash_item hash;
  68        struct vmw_resource *res;
  69        struct vmw_dma_buffer *new_backup;
  70        struct vmw_ctx_binding_state *staged_bindings;
  71        unsigned long new_backup_offset;
  72        bool first_usage;
  73        bool no_buffer_needed;
  74};
  75
  76/**
  77 * struct vmw_cmd_entry - Describe a command for the verifier
  78 *
  79 * @user_allow: Whether allowed from the execbuf ioctl.
  80 * @gb_disable: Whether disabled if guest-backed objects are available.
  81 * @gb_enable: Whether enabled iff guest-backed objects are available.
  82 */
  83struct vmw_cmd_entry {
  84        int (*func) (struct vmw_private *, struct vmw_sw_context *,
  85                     SVGA3dCmdHeader *);
  86        bool user_allow;
  87        bool gb_disable;
  88        bool gb_enable;
  89};
  90
  91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
  92        [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
  93                                       (_gb_disable), (_gb_enable)}
  94
  95/**
  96 * vmw_resource_unreserve - unreserve resources previously reserved for
  97 * command submission.
  98 *
  99 * @list_head: list of resources to unreserve.
 100 * @backoff: Whether command submission failed.
 101 */
 102static void vmw_resource_list_unreserve(struct list_head *list,
 103                                        bool backoff)
 104{
 105        struct vmw_resource_val_node *val;
 106
 107        list_for_each_entry(val, list, head) {
 108                struct vmw_resource *res = val->res;
 109                struct vmw_dma_buffer *new_backup =
 110                        backoff ? NULL : val->new_backup;
 111
 112                /*
 113                 * Transfer staged context bindings to the
 114                 * persistent context binding tracker.
 115                 */
 116                if (unlikely(val->staged_bindings)) {
 117                        if (!backoff) {
 118                                vmw_context_binding_state_transfer
 119                                        (val->res, val->staged_bindings);
 120                        }
 121                        kfree(val->staged_bindings);
 122                        val->staged_bindings = NULL;
 123                }
 124                vmw_resource_unreserve(res, new_backup,
 125                        val->new_backup_offset);
 126                vmw_dmabuf_unreference(&val->new_backup);
 127        }
 128}
 129
 130
 131/**
 132 * vmw_resource_val_add - Add a resource to the software context's
 133 * resource list if it's not already on it.
 134 *
 135 * @sw_context: Pointer to the software context.
 136 * @res: Pointer to the resource.
 137 * @p_node On successful return points to a valid pointer to a
 138 * struct vmw_resource_val_node, if non-NULL on entry.
 139 */
 140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 141                                struct vmw_resource *res,
 142                                struct vmw_resource_val_node **p_node)
 143{
 144        struct vmw_resource_val_node *node;
 145        struct drm_hash_item *hash;
 146        int ret;
 147
 148        if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 149                                    &hash) == 0)) {
 150                node = container_of(hash, struct vmw_resource_val_node, hash);
 151                node->first_usage = false;
 152                if (unlikely(p_node != NULL))
 153                        *p_node = node;
 154                return 0;
 155        }
 156
 157        node = kzalloc(sizeof(*node), GFP_KERNEL);
 158        if (unlikely(node == NULL)) {
 159                DRM_ERROR("Failed to allocate a resource validation "
 160                          "entry.\n");
 161                return -ENOMEM;
 162        }
 163
 164        node->hash.key = (unsigned long) res;
 165        ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 166        if (unlikely(ret != 0)) {
 167                DRM_ERROR("Failed to initialize a resource validation "
 168                          "entry.\n");
 169                kfree(node);
 170                return ret;
 171        }
 172        list_add_tail(&node->head, &sw_context->resource_list);
 173        node->res = vmw_resource_reference(res);
 174        node->first_usage = true;
 175
 176        if (unlikely(p_node != NULL))
 177                *p_node = node;
 178
 179        return 0;
 180}
 181
 182/**
 183 * vmw_resource_context_res_add - Put resources previously bound to a context on
 184 * the validation list
 185 *
 186 * @dev_priv: Pointer to a device private structure
 187 * @sw_context: Pointer to a software context used for this command submission
 188 * @ctx: Pointer to the context resource
 189 *
 190 * This function puts all resources that were previously bound to @ctx on
 191 * the resource validation list. This is part of the context state reemission
 192 */
 193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 194                                        struct vmw_sw_context *sw_context,
 195                                        struct vmw_resource *ctx)
 196{
 197        struct list_head *binding_list;
 198        struct vmw_ctx_binding *entry;
 199        int ret = 0;
 200        struct vmw_resource *res;
 201
 202        mutex_lock(&dev_priv->binding_mutex);
 203        binding_list = vmw_context_binding_list(ctx);
 204
 205        list_for_each_entry(entry, binding_list, ctx_list) {
 206                res = vmw_resource_reference_unless_doomed(entry->bi.res);
 207                if (unlikely(res == NULL))
 208                        continue;
 209
 210                ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
 211                vmw_resource_unreference(&res);
 212                if (unlikely(ret != 0))
 213                        break;
 214        }
 215
 216        mutex_unlock(&dev_priv->binding_mutex);
 217        return ret;
 218}
 219
 220/**
 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
 222 *
 223 * @list: Pointer to head of relocation list.
 224 * @res: The resource.
 225 * @offset: Offset into the command buffer currently being parsed where the
 226 * id that needs fixup is located. Granularity is 4 bytes.
 227 */
 228static int vmw_resource_relocation_add(struct list_head *list,
 229                                       const struct vmw_resource *res,
 230                                       unsigned long offset)
 231{
 232        struct vmw_resource_relocation *rel;
 233
 234        rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 235        if (unlikely(rel == NULL)) {
 236                DRM_ERROR("Failed to allocate a resource relocation.\n");
 237                return -ENOMEM;
 238        }
 239
 240        rel->res = res;
 241        rel->offset = offset;
 242        list_add_tail(&rel->head, list);
 243
 244        return 0;
 245}
 246
 247/**
 248 * vmw_resource_relocations_free - Free all relocations on a list
 249 *
 250 * @list: Pointer to the head of the relocation list.
 251 */
 252static void vmw_resource_relocations_free(struct list_head *list)
 253{
 254        struct vmw_resource_relocation *rel, *n;
 255
 256        list_for_each_entry_safe(rel, n, list, head) {
 257                list_del(&rel->head);
 258                kfree(rel);
 259        }
 260}
 261
 262/**
 263 * vmw_resource_relocations_apply - Apply all relocations on a list
 264 *
 265 * @cb: Pointer to the start of the command buffer bein patch. This need
 266 * not be the same buffer as the one being parsed when the relocation
 267 * list was built, but the contents must be the same modulo the
 268 * resource ids.
 269 * @list: Pointer to the head of the relocation list.
 270 */
 271static void vmw_resource_relocations_apply(uint32_t *cb,
 272                                           struct list_head *list)
 273{
 274        struct vmw_resource_relocation *rel;
 275
 276        list_for_each_entry(rel, list, head) {
 277                if (likely(rel->res != NULL))
 278                        cb[rel->offset] = rel->res->id;
 279                else
 280                        cb[rel->offset] = SVGA_3D_CMD_NOP;
 281        }
 282}
 283
 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 285                           struct vmw_sw_context *sw_context,
 286                           SVGA3dCmdHeader *header)
 287{
 288        return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 289}
 290
 291static int vmw_cmd_ok(struct vmw_private *dev_priv,
 292                      struct vmw_sw_context *sw_context,
 293                      SVGA3dCmdHeader *header)
 294{
 295        return 0;
 296}
 297
 298/**
 299 * vmw_bo_to_validate_list - add a bo to a validate list
 300 *
 301 * @sw_context: The software context used for this command submission batch.
 302 * @bo: The buffer object to add.
 303 * @validate_as_mob: Validate this buffer as a MOB.
 304 * @p_val_node: If non-NULL Will be updated with the validate node number
 305 * on return.
 306 *
 307 * Returns -EINVAL if the limit of number of buffer objects per command
 308 * submission is reached.
 309 */
 310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 311                                   struct ttm_buffer_object *bo,
 312                                   bool validate_as_mob,
 313                                   uint32_t *p_val_node)
 314{
 315        uint32_t val_node;
 316        struct vmw_validate_buffer *vval_buf;
 317        struct ttm_validate_buffer *val_buf;
 318        struct drm_hash_item *hash;
 319        int ret;
 320
 321        if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
 322                                    &hash) == 0)) {
 323                vval_buf = container_of(hash, struct vmw_validate_buffer,
 324                                        hash);
 325                if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 326                        DRM_ERROR("Inconsistent buffer usage.\n");
 327                        return -EINVAL;
 328                }
 329                val_buf = &vval_buf->base;
 330                val_node = vval_buf - sw_context->val_bufs;
 331        } else {
 332                val_node = sw_context->cur_val_buf;
 333                if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 334                        DRM_ERROR("Max number of DMA buffers per submission "
 335                                  "exceeded.\n");
 336                        return -EINVAL;
 337                }
 338                vval_buf = &sw_context->val_bufs[val_node];
 339                vval_buf->hash.key = (unsigned long) bo;
 340                ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 341                if (unlikely(ret != 0)) {
 342                        DRM_ERROR("Failed to initialize a buffer validation "
 343                                  "entry.\n");
 344                        return ret;
 345                }
 346                ++sw_context->cur_val_buf;
 347                val_buf = &vval_buf->base;
 348                val_buf->bo = ttm_bo_reference(bo);
 349                val_buf->shared = false;
 350                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 351                vval_buf->validate_as_mob = validate_as_mob;
 352        }
 353
 354        if (p_val_node)
 355                *p_val_node = val_node;
 356
 357        return 0;
 358}
 359
 360/**
 361 * vmw_resources_reserve - Reserve all resources on the sw_context's
 362 * resource list.
 363 *
 364 * @sw_context: Pointer to the software context.
 365 *
 366 * Note that since vmware's command submission currently is protected by
 367 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 368 * since only a single thread at once will attempt this.
 369 */
 370static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 371{
 372        struct vmw_resource_val_node *val;
 373        int ret;
 374
 375        list_for_each_entry(val, &sw_context->resource_list, head) {
 376                struct vmw_resource *res = val->res;
 377
 378                ret = vmw_resource_reserve(res, val->no_buffer_needed);
 379                if (unlikely(ret != 0))
 380                        return ret;
 381
 382                if (res->backup) {
 383                        struct ttm_buffer_object *bo = &res->backup->base;
 384
 385                        ret = vmw_bo_to_validate_list
 386                                (sw_context, bo,
 387                                 vmw_resource_needs_backup(res), NULL);
 388
 389                        if (unlikely(ret != 0))
 390                                return ret;
 391                }
 392        }
 393        return 0;
 394}
 395
 396/**
 397 * vmw_resources_validate - Validate all resources on the sw_context's
 398 * resource list.
 399 *
 400 * @sw_context: Pointer to the software context.
 401 *
 402 * Before this function is called, all resource backup buffers must have
 403 * been validated.
 404 */
 405static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 406{
 407        struct vmw_resource_val_node *val;
 408        int ret;
 409
 410        list_for_each_entry(val, &sw_context->resource_list, head) {
 411                struct vmw_resource *res = val->res;
 412
 413                ret = vmw_resource_validate(res);
 414                if (unlikely(ret != 0)) {
 415                        if (ret != -ERESTARTSYS)
 416                                DRM_ERROR("Failed to validate resource.\n");
 417                        return ret;
 418                }
 419        }
 420        return 0;
 421}
 422
 423
 424/**
 425 * vmw_cmd_res_reloc_add - Add a resource to a software context's
 426 * relocation- and validation lists.
 427 *
 428 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 429 * @sw_context: Pointer to the software context.
 430 * @res_type: Resource type.
 431 * @id_loc: Pointer to where the id that needs translation is located.
 432 * @res: Valid pointer to a struct vmw_resource.
 433 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
 434 * used for this resource is returned here.
 435 */
 436static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 437                                 struct vmw_sw_context *sw_context,
 438                                 enum vmw_res_type res_type,
 439                                 uint32_t *id_loc,
 440                                 struct vmw_resource *res,
 441                                 struct vmw_resource_val_node **p_val)
 442{
 443        int ret;
 444        struct vmw_resource_val_node *node;
 445
 446        *p_val = NULL;
 447        ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 448                                          res,
 449                                          id_loc - sw_context->buf_start);
 450        if (unlikely(ret != 0))
 451                return ret;
 452
 453        ret = vmw_resource_val_add(sw_context, res, &node);
 454        if (unlikely(ret != 0))
 455                return ret;
 456
 457        if (res_type == vmw_res_context && dev_priv->has_mob &&
 458            node->first_usage) {
 459
 460                /*
 461                 * Put contexts first on the list to be able to exit
 462                 * list traversal for contexts early.
 463                 */
 464                list_del(&node->head);
 465                list_add(&node->head, &sw_context->resource_list);
 466
 467                ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 468                if (unlikely(ret != 0))
 469                        return ret;
 470                node->staged_bindings =
 471                        kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
 472                if (node->staged_bindings == NULL) {
 473                        DRM_ERROR("Failed to allocate context binding "
 474                                  "information.\n");
 475                        return -ENOMEM;
 476                }
 477                INIT_LIST_HEAD(&node->staged_bindings->list);
 478        }
 479
 480        if (p_val)
 481                *p_val = node;
 482
 483        return 0;
 484}
 485
 486
 487/**
 488 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 489 * on the resource validate list unless it's already there.
 490 *
 491 * @dev_priv: Pointer to a device private structure.
 492 * @sw_context: Pointer to the software context.
 493 * @res_type: Resource type.
 494 * @converter: User-space visisble type specific information.
 495 * @id_loc: Pointer to the location in the command buffer currently being
 496 * parsed from where the user-space resource id handle is located.
 497 * @p_val: Pointer to pointer to resource validalidation node. Populated
 498 * on exit.
 499 */
 500static int
 501vmw_cmd_res_check(struct vmw_private *dev_priv,
 502                  struct vmw_sw_context *sw_context,
 503                  enum vmw_res_type res_type,
 504                  const struct vmw_user_resource_conv *converter,
 505                  uint32_t *id_loc,
 506                  struct vmw_resource_val_node **p_val)
 507{
 508        struct vmw_res_cache_entry *rcache =
 509                &sw_context->res_cache[res_type];
 510        struct vmw_resource *res;
 511        struct vmw_resource_val_node *node;
 512        int ret;
 513
 514        if (*id_loc == SVGA3D_INVALID_ID) {
 515                if (p_val)
 516                        *p_val = NULL;
 517                if (res_type == vmw_res_context) {
 518                        DRM_ERROR("Illegal context invalid id.\n");
 519                        return -EINVAL;
 520                }
 521                return 0;
 522        }
 523
 524        /*
 525         * Fastpath in case of repeated commands referencing the same
 526         * resource
 527         */
 528
 529        if (likely(rcache->valid && *id_loc == rcache->handle)) {
 530                const struct vmw_resource *res = rcache->res;
 531
 532                rcache->node->first_usage = false;
 533                if (p_val)
 534                        *p_val = rcache->node;
 535
 536                return vmw_resource_relocation_add
 537                        (&sw_context->res_relocations, res,
 538                         id_loc - sw_context->buf_start);
 539        }
 540
 541        ret = vmw_user_resource_lookup_handle(dev_priv,
 542                                              sw_context->fp->tfile,
 543                                              *id_loc,
 544                                              converter,
 545                                              &res);
 546        if (unlikely(ret != 0)) {
 547                DRM_ERROR("Could not find or use resource 0x%08x.\n",
 548                          (unsigned) *id_loc);
 549                dump_stack();
 550                return ret;
 551        }
 552
 553        rcache->valid = true;
 554        rcache->res = res;
 555        rcache->handle = *id_loc;
 556
 557        ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
 558                                    res, &node);
 559        if (unlikely(ret != 0))
 560                goto out_no_reloc;
 561
 562        rcache->node = node;
 563        if (p_val)
 564                *p_val = node;
 565        vmw_resource_unreference(&res);
 566        return 0;
 567
 568out_no_reloc:
 569        BUG_ON(sw_context->error_resource != NULL);
 570        sw_context->error_resource = res;
 571
 572        return ret;
 573}
 574
 575/**
 576 * vmw_rebind_contexts - Rebind all resources previously bound to
 577 * referenced contexts.
 578 *
 579 * @sw_context: Pointer to the software context.
 580 *
 581 * Rebind context binding points that have been scrubbed because of eviction.
 582 */
 583static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 584{
 585        struct vmw_resource_val_node *val;
 586        int ret;
 587
 588        list_for_each_entry(val, &sw_context->resource_list, head) {
 589                if (unlikely(!val->staged_bindings))
 590                        break;
 591
 592                ret = vmw_context_rebind_all(val->res);
 593                if (unlikely(ret != 0)) {
 594                        if (ret != -ERESTARTSYS)
 595                                DRM_ERROR("Failed to rebind context.\n");
 596                        return ret;
 597                }
 598        }
 599
 600        return 0;
 601}
 602
 603/**
 604 * vmw_cmd_cid_check - Check a command header for valid context information.
 605 *
 606 * @dev_priv: Pointer to a device private structure.
 607 * @sw_context: Pointer to the software context.
 608 * @header: A command header with an embedded user-space context handle.
 609 *
 610 * Convenience function: Call vmw_cmd_res_check with the user-space context
 611 * handle embedded in @header.
 612 */
 613static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 614                             struct vmw_sw_context *sw_context,
 615                             SVGA3dCmdHeader *header)
 616{
 617        struct vmw_cid_cmd {
 618                SVGA3dCmdHeader header;
 619                uint32_t cid;
 620        } *cmd;
 621
 622        cmd = container_of(header, struct vmw_cid_cmd, header);
 623        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 624                                 user_context_converter, &cmd->cid, NULL);
 625}
 626
 627static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 628                                           struct vmw_sw_context *sw_context,
 629                                           SVGA3dCmdHeader *header)
 630{
 631        struct vmw_sid_cmd {
 632                SVGA3dCmdHeader header;
 633                SVGA3dCmdSetRenderTarget body;
 634        } *cmd;
 635        struct vmw_resource_val_node *ctx_node;
 636        struct vmw_resource_val_node *res_node;
 637        int ret;
 638
 639        cmd = container_of(header, struct vmw_sid_cmd, header);
 640
 641        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 642                                user_context_converter, &cmd->body.cid,
 643                                &ctx_node);
 644        if (unlikely(ret != 0))
 645                return ret;
 646
 647        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 648                                user_surface_converter,
 649                                &cmd->body.target.sid, &res_node);
 650        if (unlikely(ret != 0))
 651                return ret;
 652
 653        if (dev_priv->has_mob) {
 654                struct vmw_ctx_bindinfo bi;
 655
 656                bi.ctx = ctx_node->res;
 657                bi.res = res_node ? res_node->res : NULL;
 658                bi.bt = vmw_ctx_binding_rt;
 659                bi.i1.rt_type = cmd->body.type;
 660                return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
 661        }
 662
 663        return 0;
 664}
 665
 666static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 667                                      struct vmw_sw_context *sw_context,
 668                                      SVGA3dCmdHeader *header)
 669{
 670        struct vmw_sid_cmd {
 671                SVGA3dCmdHeader header;
 672                SVGA3dCmdSurfaceCopy body;
 673        } *cmd;
 674        int ret;
 675
 676        cmd = container_of(header, struct vmw_sid_cmd, header);
 677        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 678                                user_surface_converter,
 679                                &cmd->body.src.sid, NULL);
 680        if (unlikely(ret != 0))
 681                return ret;
 682        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 683                                 user_surface_converter,
 684                                 &cmd->body.dest.sid, NULL);
 685}
 686
 687static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 688                                     struct vmw_sw_context *sw_context,
 689                                     SVGA3dCmdHeader *header)
 690{
 691        struct vmw_sid_cmd {
 692                SVGA3dCmdHeader header;
 693                SVGA3dCmdSurfaceStretchBlt body;
 694        } *cmd;
 695        int ret;
 696
 697        cmd = container_of(header, struct vmw_sid_cmd, header);
 698        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 699                                user_surface_converter,
 700                                &cmd->body.src.sid, NULL);
 701        if (unlikely(ret != 0))
 702                return ret;
 703        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 704                                 user_surface_converter,
 705                                 &cmd->body.dest.sid, NULL);
 706}
 707
 708static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 709                                         struct vmw_sw_context *sw_context,
 710                                         SVGA3dCmdHeader *header)
 711{
 712        struct vmw_sid_cmd {
 713                SVGA3dCmdHeader header;
 714                SVGA3dCmdBlitSurfaceToScreen body;
 715        } *cmd;
 716
 717        cmd = container_of(header, struct vmw_sid_cmd, header);
 718
 719        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 720                                 user_surface_converter,
 721                                 &cmd->body.srcImage.sid, NULL);
 722}
 723
 724static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 725                                 struct vmw_sw_context *sw_context,
 726                                 SVGA3dCmdHeader *header)
 727{
 728        struct vmw_sid_cmd {
 729                SVGA3dCmdHeader header;
 730                SVGA3dCmdPresent body;
 731        } *cmd;
 732
 733
 734        cmd = container_of(header, struct vmw_sid_cmd, header);
 735
 736        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 737                                 user_surface_converter, &cmd->body.sid,
 738                                 NULL);
 739}
 740
 741/**
 742 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 743 *
 744 * @dev_priv: The device private structure.
 745 * @new_query_bo: The new buffer holding query results.
 746 * @sw_context: The software context used for this command submission.
 747 *
 748 * This function checks whether @new_query_bo is suitable for holding
 749 * query results, and if another buffer currently is pinned for query
 750 * results. If so, the function prepares the state of @sw_context for
 751 * switching pinned buffers after successful submission of the current
 752 * command batch.
 753 */
 754static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 755                                       struct ttm_buffer_object *new_query_bo,
 756                                       struct vmw_sw_context *sw_context)
 757{
 758        struct vmw_res_cache_entry *ctx_entry =
 759                &sw_context->res_cache[vmw_res_context];
 760        int ret;
 761
 762        BUG_ON(!ctx_entry->valid);
 763        sw_context->last_query_ctx = ctx_entry->res;
 764
 765        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 766
 767                if (unlikely(new_query_bo->num_pages > 4)) {
 768                        DRM_ERROR("Query buffer too large.\n");
 769                        return -EINVAL;
 770                }
 771
 772                if (unlikely(sw_context->cur_query_bo != NULL)) {
 773                        sw_context->needs_post_query_barrier = true;
 774                        ret = vmw_bo_to_validate_list(sw_context,
 775                                                      sw_context->cur_query_bo,
 776                                                      dev_priv->has_mob, NULL);
 777                        if (unlikely(ret != 0))
 778                                return ret;
 779                }
 780                sw_context->cur_query_bo = new_query_bo;
 781
 782                ret = vmw_bo_to_validate_list(sw_context,
 783                                              dev_priv->dummy_query_bo,
 784                                              dev_priv->has_mob, NULL);
 785                if (unlikely(ret != 0))
 786                        return ret;
 787
 788        }
 789
 790        return 0;
 791}
 792
 793
 794/**
 795 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 796 *
 797 * @dev_priv: The device private structure.
 798 * @sw_context: The software context used for this command submission batch.
 799 *
 800 * This function will check if we're switching query buffers, and will then,
 801 * issue a dummy occlusion query wait used as a query barrier. When the fence
 802 * object following that query wait has signaled, we are sure that all
 803 * preceding queries have finished, and the old query buffer can be unpinned.
 804 * However, since both the new query buffer and the old one are fenced with
 805 * that fence, we can do an asynchronus unpin now, and be sure that the
 806 * old query buffer won't be moved until the fence has signaled.
 807 *
 808 * As mentioned above, both the new - and old query buffers need to be fenced
 809 * using a sequence emitted *after* calling this function.
 810 */
 811static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 812                                     struct vmw_sw_context *sw_context)
 813{
 814        /*
 815         * The validate list should still hold references to all
 816         * contexts here.
 817         */
 818
 819        if (sw_context->needs_post_query_barrier) {
 820                struct vmw_res_cache_entry *ctx_entry =
 821                        &sw_context->res_cache[vmw_res_context];
 822                struct vmw_resource *ctx;
 823                int ret;
 824
 825                BUG_ON(!ctx_entry->valid);
 826                ctx = ctx_entry->res;
 827
 828                ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 829
 830                if (unlikely(ret != 0))
 831                        DRM_ERROR("Out of fifo space for dummy query.\n");
 832        }
 833
 834        if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 835                if (dev_priv->pinned_bo) {
 836                        vmw_bo_pin(dev_priv->pinned_bo, false);
 837                        ttm_bo_unref(&dev_priv->pinned_bo);
 838                }
 839
 840                if (!sw_context->needs_post_query_barrier) {
 841                        vmw_bo_pin(sw_context->cur_query_bo, true);
 842
 843                        /*
 844                         * We pin also the dummy_query_bo buffer so that we
 845                         * don't need to validate it when emitting
 846                         * dummy queries in context destroy paths.
 847                         */
 848
 849                        vmw_bo_pin(dev_priv->dummy_query_bo, true);
 850                        dev_priv->dummy_query_bo_pinned = true;
 851
 852                        BUG_ON(sw_context->last_query_ctx == NULL);
 853                        dev_priv->query_cid = sw_context->last_query_ctx->id;
 854                        dev_priv->query_cid_valid = true;
 855                        dev_priv->pinned_bo =
 856                                ttm_bo_reference(sw_context->cur_query_bo);
 857                }
 858        }
 859}
 860
 861/**
 862 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
 863 * handle to a MOB id.
 864 *
 865 * @dev_priv: Pointer to a device private structure.
 866 * @sw_context: The software context used for this command batch validation.
 867 * @id: Pointer to the user-space handle to be translated.
 868 * @vmw_bo_p: Points to a location that, on successful return will carry
 869 * a reference-counted pointer to the DMA buffer identified by the
 870 * user-space handle in @id.
 871 *
 872 * This function saves information needed to translate a user-space buffer
 873 * handle to a MOB id. The translation does not take place immediately, but
 874 * during a call to vmw_apply_relocations(). This function builds a relocation
 875 * list and a list of buffers to validate. The former needs to be freed using
 876 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
 877 * needs to be freed using vmw_clear_validations.
 878 */
 879static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 880                                 struct vmw_sw_context *sw_context,
 881                                 SVGAMobId *id,
 882                                 struct vmw_dma_buffer **vmw_bo_p)
 883{
 884        struct vmw_dma_buffer *vmw_bo = NULL;
 885        struct ttm_buffer_object *bo;
 886        uint32_t handle = *id;
 887        struct vmw_relocation *reloc;
 888        int ret;
 889
 890        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 891        if (unlikely(ret != 0)) {
 892                DRM_ERROR("Could not find or use MOB buffer.\n");
 893                ret = -EINVAL;
 894                goto out_no_reloc;
 895        }
 896        bo = &vmw_bo->base;
 897
 898        if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 899                DRM_ERROR("Max number relocations per submission"
 900                          " exceeded\n");
 901                ret = -EINVAL;
 902                goto out_no_reloc;
 903        }
 904
 905        reloc = &sw_context->relocs[sw_context->cur_reloc++];
 906        reloc->mob_loc = id;
 907        reloc->location = NULL;
 908
 909        ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
 910        if (unlikely(ret != 0))
 911                goto out_no_reloc;
 912
 913        *vmw_bo_p = vmw_bo;
 914        return 0;
 915
 916out_no_reloc:
 917        vmw_dmabuf_unreference(&vmw_bo);
 918        *vmw_bo_p = NULL;
 919        return ret;
 920}
 921
 922/**
 923 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
 924 * handle to a valid SVGAGuestPtr
 925 *
 926 * @dev_priv: Pointer to a device private structure.
 927 * @sw_context: The software context used for this command batch validation.
 928 * @ptr: Pointer to the user-space handle to be translated.
 929 * @vmw_bo_p: Points to a location that, on successful return will carry
 930 * a reference-counted pointer to the DMA buffer identified by the
 931 * user-space handle in @id.
 932 *
 933 * This function saves information needed to translate a user-space buffer
 934 * handle to a valid SVGAGuestPtr. The translation does not take place
 935 * immediately, but during a call to vmw_apply_relocations().
 936 * This function builds a relocation list and a list of buffers to validate.
 937 * The former needs to be freed using either vmw_apply_relocations() or
 938 * vmw_free_relocations(). The latter needs to be freed using
 939 * vmw_clear_validations.
 940 */
 941static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 942                                   struct vmw_sw_context *sw_context,
 943                                   SVGAGuestPtr *ptr,
 944                                   struct vmw_dma_buffer **vmw_bo_p)
 945{
 946        struct vmw_dma_buffer *vmw_bo = NULL;
 947        struct ttm_buffer_object *bo;
 948        uint32_t handle = ptr->gmrId;
 949        struct vmw_relocation *reloc;
 950        int ret;
 951
 952        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 953        if (unlikely(ret != 0)) {
 954                DRM_ERROR("Could not find or use GMR region.\n");
 955                ret = -EINVAL;
 956                goto out_no_reloc;
 957        }
 958        bo = &vmw_bo->base;
 959
 960        if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 961                DRM_ERROR("Max number relocations per submission"
 962                          " exceeded\n");
 963                ret = -EINVAL;
 964                goto out_no_reloc;
 965        }
 966
 967        reloc = &sw_context->relocs[sw_context->cur_reloc++];
 968        reloc->location = ptr;
 969
 970        ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
 971        if (unlikely(ret != 0))
 972                goto out_no_reloc;
 973
 974        *vmw_bo_p = vmw_bo;
 975        return 0;
 976
 977out_no_reloc:
 978        vmw_dmabuf_unreference(&vmw_bo);
 979        *vmw_bo_p = NULL;
 980        return ret;
 981}
 982
 983/**
 984 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
 985 *
 986 * @dev_priv: Pointer to a device private struct.
 987 * @sw_context: The software context used for this command submission.
 988 * @header: Pointer to the command header in the command stream.
 989 */
 990static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
 991                                  struct vmw_sw_context *sw_context,
 992                                  SVGA3dCmdHeader *header)
 993{
 994        struct vmw_begin_gb_query_cmd {
 995                SVGA3dCmdHeader header;
 996                SVGA3dCmdBeginGBQuery q;
 997        } *cmd;
 998
 999        cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1000                           header);
1001
1002        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1003                                 user_context_converter, &cmd->q.cid,
1004                                 NULL);
1005}
1006
1007/**
1008 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1009 *
1010 * @dev_priv: Pointer to a device private struct.
1011 * @sw_context: The software context used for this command submission.
1012 * @header: Pointer to the command header in the command stream.
1013 */
1014static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1015                               struct vmw_sw_context *sw_context,
1016                               SVGA3dCmdHeader *header)
1017{
1018        struct vmw_begin_query_cmd {
1019                SVGA3dCmdHeader header;
1020                SVGA3dCmdBeginQuery q;
1021        } *cmd;
1022
1023        cmd = container_of(header, struct vmw_begin_query_cmd,
1024                           header);
1025
1026        if (unlikely(dev_priv->has_mob)) {
1027                struct {
1028                        SVGA3dCmdHeader header;
1029                        SVGA3dCmdBeginGBQuery q;
1030                } gb_cmd;
1031
1032                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1033
1034                gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1035                gb_cmd.header.size = cmd->header.size;
1036                gb_cmd.q.cid = cmd->q.cid;
1037                gb_cmd.q.type = cmd->q.type;
1038
1039                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1040                return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1041        }
1042
1043        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1044                                 user_context_converter, &cmd->q.cid,
1045                                 NULL);
1046}
1047
1048/**
1049 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1050 *
1051 * @dev_priv: Pointer to a device private struct.
1052 * @sw_context: The software context used for this command submission.
1053 * @header: Pointer to the command header in the command stream.
1054 */
1055static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1056                                struct vmw_sw_context *sw_context,
1057                                SVGA3dCmdHeader *header)
1058{
1059        struct vmw_dma_buffer *vmw_bo;
1060        struct vmw_query_cmd {
1061                SVGA3dCmdHeader header;
1062                SVGA3dCmdEndGBQuery q;
1063        } *cmd;
1064        int ret;
1065
1066        cmd = container_of(header, struct vmw_query_cmd, header);
1067        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1068        if (unlikely(ret != 0))
1069                return ret;
1070
1071        ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1072                                    &cmd->q.mobid,
1073                                    &vmw_bo);
1074        if (unlikely(ret != 0))
1075                return ret;
1076
1077        ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1078
1079        vmw_dmabuf_unreference(&vmw_bo);
1080        return ret;
1081}
1082
1083/**
1084 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1085 *
1086 * @dev_priv: Pointer to a device private struct.
1087 * @sw_context: The software context used for this command submission.
1088 * @header: Pointer to the command header in the command stream.
1089 */
1090static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1091                             struct vmw_sw_context *sw_context,
1092                             SVGA3dCmdHeader *header)
1093{
1094        struct vmw_dma_buffer *vmw_bo;
1095        struct vmw_query_cmd {
1096                SVGA3dCmdHeader header;
1097                SVGA3dCmdEndQuery q;
1098        } *cmd;
1099        int ret;
1100
1101        cmd = container_of(header, struct vmw_query_cmd, header);
1102        if (dev_priv->has_mob) {
1103                struct {
1104                        SVGA3dCmdHeader header;
1105                        SVGA3dCmdEndGBQuery q;
1106                } gb_cmd;
1107
1108                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1109
1110                gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1111                gb_cmd.header.size = cmd->header.size;
1112                gb_cmd.q.cid = cmd->q.cid;
1113                gb_cmd.q.type = cmd->q.type;
1114                gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1115                gb_cmd.q.offset = cmd->q.guestResult.offset;
1116
1117                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1118                return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1119        }
1120
1121        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1122        if (unlikely(ret != 0))
1123                return ret;
1124
1125        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1126                                      &cmd->q.guestResult,
1127                                      &vmw_bo);
1128        if (unlikely(ret != 0))
1129                return ret;
1130
1131        ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1132
1133        vmw_dmabuf_unreference(&vmw_bo);
1134        return ret;
1135}
1136
1137/**
1138 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1139 *
1140 * @dev_priv: Pointer to a device private struct.
1141 * @sw_context: The software context used for this command submission.
1142 * @header: Pointer to the command header in the command stream.
1143 */
1144static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1145                                 struct vmw_sw_context *sw_context,
1146                                 SVGA3dCmdHeader *header)
1147{
1148        struct vmw_dma_buffer *vmw_bo;
1149        struct vmw_query_cmd {
1150                SVGA3dCmdHeader header;
1151                SVGA3dCmdWaitForGBQuery q;
1152        } *cmd;
1153        int ret;
1154
1155        cmd = container_of(header, struct vmw_query_cmd, header);
1156        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1157        if (unlikely(ret != 0))
1158                return ret;
1159
1160        ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1161                                    &cmd->q.mobid,
1162                                    &vmw_bo);
1163        if (unlikely(ret != 0))
1164                return ret;
1165
1166        vmw_dmabuf_unreference(&vmw_bo);
1167        return 0;
1168}
1169
1170/**
1171 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1172 *
1173 * @dev_priv: Pointer to a device private struct.
1174 * @sw_context: The software context used for this command submission.
1175 * @header: Pointer to the command header in the command stream.
1176 */
1177static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1178                              struct vmw_sw_context *sw_context,
1179                              SVGA3dCmdHeader *header)
1180{
1181        struct vmw_dma_buffer *vmw_bo;
1182        struct vmw_query_cmd {
1183                SVGA3dCmdHeader header;
1184                SVGA3dCmdWaitForQuery q;
1185        } *cmd;
1186        int ret;
1187
1188        cmd = container_of(header, struct vmw_query_cmd, header);
1189        if (dev_priv->has_mob) {
1190                struct {
1191                        SVGA3dCmdHeader header;
1192                        SVGA3dCmdWaitForGBQuery q;
1193                } gb_cmd;
1194
1195                BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1196
1197                gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1198                gb_cmd.header.size = cmd->header.size;
1199                gb_cmd.q.cid = cmd->q.cid;
1200                gb_cmd.q.type = cmd->q.type;
1201                gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1202                gb_cmd.q.offset = cmd->q.guestResult.offset;
1203
1204                memcpy(cmd, &gb_cmd, sizeof(*cmd));
1205                return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1206        }
1207
1208        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1209        if (unlikely(ret != 0))
1210                return ret;
1211
1212        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1213                                      &cmd->q.guestResult,
1214                                      &vmw_bo);
1215        if (unlikely(ret != 0))
1216                return ret;
1217
1218        vmw_dmabuf_unreference(&vmw_bo);
1219        return 0;
1220}
1221
1222static int vmw_cmd_dma(struct vmw_private *dev_priv,
1223                       struct vmw_sw_context *sw_context,
1224                       SVGA3dCmdHeader *header)
1225{
1226        struct vmw_dma_buffer *vmw_bo = NULL;
1227        struct vmw_surface *srf = NULL;
1228        struct vmw_dma_cmd {
1229                SVGA3dCmdHeader header;
1230                SVGA3dCmdSurfaceDMA dma;
1231        } *cmd;
1232        int ret;
1233        SVGA3dCmdSurfaceDMASuffix *suffix;
1234        uint32_t bo_size;
1235
1236        cmd = container_of(header, struct vmw_dma_cmd, header);
1237        suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1238                                               header->size - sizeof(*suffix));
1239
1240        /* Make sure device and verifier stays in sync. */
1241        if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1242                DRM_ERROR("Invalid DMA suffix size.\n");
1243                return -EINVAL;
1244        }
1245
1246        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1247                                      &cmd->dma.guest.ptr,
1248                                      &vmw_bo);
1249        if (unlikely(ret != 0))
1250                return ret;
1251
1252        /* Make sure DMA doesn't cross BO boundaries. */
1253        bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1254        if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1255                DRM_ERROR("Invalid DMA offset.\n");
1256                return -EINVAL;
1257        }
1258
1259        bo_size -= cmd->dma.guest.ptr.offset;
1260        if (unlikely(suffix->maximumOffset > bo_size))
1261                suffix->maximumOffset = bo_size;
1262
1263        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1264                                user_surface_converter, &cmd->dma.host.sid,
1265                                NULL);
1266        if (unlikely(ret != 0)) {
1267                if (unlikely(ret != -ERESTARTSYS))
1268                        DRM_ERROR("could not find surface for DMA.\n");
1269                goto out_no_surface;
1270        }
1271
1272        srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1273
1274        vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1275                             header);
1276
1277out_no_surface:
1278        vmw_dmabuf_unreference(&vmw_bo);
1279        return ret;
1280}
1281
1282static int vmw_cmd_draw(struct vmw_private *dev_priv,
1283                        struct vmw_sw_context *sw_context,
1284                        SVGA3dCmdHeader *header)
1285{
1286        struct vmw_draw_cmd {
1287                SVGA3dCmdHeader header;
1288                SVGA3dCmdDrawPrimitives body;
1289        } *cmd;
1290        SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1291                (unsigned long)header + sizeof(*cmd));
1292        SVGA3dPrimitiveRange *range;
1293        uint32_t i;
1294        uint32_t maxnum;
1295        int ret;
1296
1297        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1298        if (unlikely(ret != 0))
1299                return ret;
1300
1301        cmd = container_of(header, struct vmw_draw_cmd, header);
1302        maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1303
1304        if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1305                DRM_ERROR("Illegal number of vertex declarations.\n");
1306                return -EINVAL;
1307        }
1308
1309        for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1310                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1311                                        user_surface_converter,
1312                                        &decl->array.surfaceId, NULL);
1313                if (unlikely(ret != 0))
1314                        return ret;
1315        }
1316
1317        maxnum = (header->size - sizeof(cmd->body) -
1318                  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1319        if (unlikely(cmd->body.numRanges > maxnum)) {
1320                DRM_ERROR("Illegal number of index ranges.\n");
1321                return -EINVAL;
1322        }
1323
1324        range = (SVGA3dPrimitiveRange *) decl;
1325        for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1326                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1327                                        user_surface_converter,
1328                                        &range->indexArray.surfaceId, NULL);
1329                if (unlikely(ret != 0))
1330                        return ret;
1331        }
1332        return 0;
1333}
1334
1335
1336static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1337                             struct vmw_sw_context *sw_context,
1338                             SVGA3dCmdHeader *header)
1339{
1340        struct vmw_tex_state_cmd {
1341                SVGA3dCmdHeader header;
1342                SVGA3dCmdSetTextureState state;
1343        } *cmd;
1344
1345        SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1346          ((unsigned long) header + header->size + sizeof(header));
1347        SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1348                ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1349        struct vmw_resource_val_node *ctx_node;
1350        struct vmw_resource_val_node *res_node;
1351        int ret;
1352
1353        cmd = container_of(header, struct vmw_tex_state_cmd,
1354                           header);
1355
1356        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1357                                user_context_converter, &cmd->state.cid,
1358                                &ctx_node);
1359        if (unlikely(ret != 0))
1360                return ret;
1361
1362        for (; cur_state < last_state; ++cur_state) {
1363                if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1364                        continue;
1365
1366                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1367                                        user_surface_converter,
1368                                        &cur_state->value, &res_node);
1369                if (unlikely(ret != 0))
1370                        return ret;
1371
1372                if (dev_priv->has_mob) {
1373                        struct vmw_ctx_bindinfo bi;
1374
1375                        bi.ctx = ctx_node->res;
1376                        bi.res = res_node ? res_node->res : NULL;
1377                        bi.bt = vmw_ctx_binding_tex;
1378                        bi.i1.texture_stage = cur_state->stage;
1379                        vmw_context_binding_add(ctx_node->staged_bindings,
1380                                                &bi);
1381                }
1382        }
1383
1384        return 0;
1385}
1386
1387static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1388                                      struct vmw_sw_context *sw_context,
1389                                      void *buf)
1390{
1391        struct vmw_dma_buffer *vmw_bo;
1392        int ret;
1393
1394        struct {
1395                uint32_t header;
1396                SVGAFifoCmdDefineGMRFB body;
1397        } *cmd = buf;
1398
1399        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1400                                      &cmd->body.ptr,
1401                                      &vmw_bo);
1402        if (unlikely(ret != 0))
1403                return ret;
1404
1405        vmw_dmabuf_unreference(&vmw_bo);
1406
1407        return ret;
1408}
1409
1410/**
1411 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1412 *
1413 * @dev_priv: Pointer to a device private struct.
1414 * @sw_context: The software context being used for this batch.
1415 * @res_type: The resource type.
1416 * @converter: Information about user-space binding for this resource type.
1417 * @res_id: Pointer to the user-space resource handle in the command stream.
1418 * @buf_id: Pointer to the user-space backup buffer handle in the command
1419 * stream.
1420 * @backup_offset: Offset of backup into MOB.
1421 *
1422 * This function prepares for registering a switch of backup buffers
1423 * in the resource metadata just prior to unreserving.
1424 */
1425static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1426                                 struct vmw_sw_context *sw_context,
1427                                 enum vmw_res_type res_type,
1428                                 const struct vmw_user_resource_conv
1429                                 *converter,
1430                                 uint32_t *res_id,
1431                                 uint32_t *buf_id,
1432                                 unsigned long backup_offset)
1433{
1434        int ret;
1435        struct vmw_dma_buffer *dma_buf;
1436        struct vmw_resource_val_node *val_node;
1437
1438        ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1439                                converter, res_id, &val_node);
1440        if (unlikely(ret != 0))
1441                return ret;
1442
1443        ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1444        if (unlikely(ret != 0))
1445                return ret;
1446
1447        if (val_node->first_usage)
1448                val_node->no_buffer_needed = true;
1449
1450        vmw_dmabuf_unreference(&val_node->new_backup);
1451        val_node->new_backup = dma_buf;
1452        val_node->new_backup_offset = backup_offset;
1453
1454        return 0;
1455}
1456
1457/**
1458 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1459 * command
1460 *
1461 * @dev_priv: Pointer to a device private struct.
1462 * @sw_context: The software context being used for this batch.
1463 * @header: Pointer to the command header in the command stream.
1464 */
1465static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1466                                   struct vmw_sw_context *sw_context,
1467                                   SVGA3dCmdHeader *header)
1468{
1469        struct vmw_bind_gb_surface_cmd {
1470                SVGA3dCmdHeader header;
1471                SVGA3dCmdBindGBSurface body;
1472        } *cmd;
1473
1474        cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1475
1476        return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1477                                     user_surface_converter,
1478                                     &cmd->body.sid, &cmd->body.mobid,
1479                                     0);
1480}
1481
1482/**
1483 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1484 * command
1485 *
1486 * @dev_priv: Pointer to a device private struct.
1487 * @sw_context: The software context being used for this batch.
1488 * @header: Pointer to the command header in the command stream.
1489 */
1490static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1491                                   struct vmw_sw_context *sw_context,
1492                                   SVGA3dCmdHeader *header)
1493{
1494        struct vmw_gb_surface_cmd {
1495                SVGA3dCmdHeader header;
1496                SVGA3dCmdUpdateGBImage body;
1497        } *cmd;
1498
1499        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1500
1501        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1502                                 user_surface_converter,
1503                                 &cmd->body.image.sid, NULL);
1504}
1505
1506/**
1507 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1508 * command
1509 *
1510 * @dev_priv: Pointer to a device private struct.
1511 * @sw_context: The software context being used for this batch.
1512 * @header: Pointer to the command header in the command stream.
1513 */
1514static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1515                                     struct vmw_sw_context *sw_context,
1516                                     SVGA3dCmdHeader *header)
1517{
1518        struct vmw_gb_surface_cmd {
1519                SVGA3dCmdHeader header;
1520                SVGA3dCmdUpdateGBSurface body;
1521        } *cmd;
1522
1523        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1524
1525        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1526                                 user_surface_converter,
1527                                 &cmd->body.sid, NULL);
1528}
1529
1530/**
1531 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1532 * command
1533 *
1534 * @dev_priv: Pointer to a device private struct.
1535 * @sw_context: The software context being used for this batch.
1536 * @header: Pointer to the command header in the command stream.
1537 */
1538static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1539                                     struct vmw_sw_context *sw_context,
1540                                     SVGA3dCmdHeader *header)
1541{
1542        struct vmw_gb_surface_cmd {
1543                SVGA3dCmdHeader header;
1544                SVGA3dCmdReadbackGBImage body;
1545        } *cmd;
1546
1547        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1548
1549        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1550                                 user_surface_converter,
1551                                 &cmd->body.image.sid, NULL);
1552}
1553
1554/**
1555 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1556 * command
1557 *
1558 * @dev_priv: Pointer to a device private struct.
1559 * @sw_context: The software context being used for this batch.
1560 * @header: Pointer to the command header in the command stream.
1561 */
1562static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1563                                       struct vmw_sw_context *sw_context,
1564                                       SVGA3dCmdHeader *header)
1565{
1566        struct vmw_gb_surface_cmd {
1567                SVGA3dCmdHeader header;
1568                SVGA3dCmdReadbackGBSurface body;
1569        } *cmd;
1570
1571        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1572
1573        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1574                                 user_surface_converter,
1575                                 &cmd->body.sid, NULL);
1576}
1577
1578/**
1579 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1580 * command
1581 *
1582 * @dev_priv: Pointer to a device private struct.
1583 * @sw_context: The software context being used for this batch.
1584 * @header: Pointer to the command header in the command stream.
1585 */
1586static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1587                                       struct vmw_sw_context *sw_context,
1588                                       SVGA3dCmdHeader *header)
1589{
1590        struct vmw_gb_surface_cmd {
1591                SVGA3dCmdHeader header;
1592                SVGA3dCmdInvalidateGBImage body;
1593        } *cmd;
1594
1595        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1596
1597        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1598                                 user_surface_converter,
1599                                 &cmd->body.image.sid, NULL);
1600}
1601
1602/**
1603 * vmw_cmd_invalidate_gb_surface - Validate an
1604 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1605 *
1606 * @dev_priv: Pointer to a device private struct.
1607 * @sw_context: The software context being used for this batch.
1608 * @header: Pointer to the command header in the command stream.
1609 */
1610static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1611                                         struct vmw_sw_context *sw_context,
1612                                         SVGA3dCmdHeader *header)
1613{
1614        struct vmw_gb_surface_cmd {
1615                SVGA3dCmdHeader header;
1616                SVGA3dCmdInvalidateGBSurface body;
1617        } *cmd;
1618
1619        cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1620
1621        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1622                                 user_surface_converter,
1623                                 &cmd->body.sid, NULL);
1624}
1625
1626
1627/**
1628 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1629 * command
1630 *
1631 * @dev_priv: Pointer to a device private struct.
1632 * @sw_context: The software context being used for this batch.
1633 * @header: Pointer to the command header in the command stream.
1634 */
1635static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1636                                 struct vmw_sw_context *sw_context,
1637                                 SVGA3dCmdHeader *header)
1638{
1639        struct vmw_shader_define_cmd {
1640                SVGA3dCmdHeader header;
1641                SVGA3dCmdDefineShader body;
1642        } *cmd;
1643        int ret;
1644        size_t size;
1645        struct vmw_resource_val_node *val;
1646
1647        cmd = container_of(header, struct vmw_shader_define_cmd,
1648                           header);
1649
1650        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1651                                user_context_converter, &cmd->body.cid,
1652                                &val);
1653        if (unlikely(ret != 0))
1654                return ret;
1655
1656        if (unlikely(!dev_priv->has_mob))
1657                return 0;
1658
1659        size = cmd->header.size - sizeof(cmd->body);
1660        ret = vmw_compat_shader_add(dev_priv,
1661                                    vmw_context_res_man(val->res),
1662                                    cmd->body.shid, cmd + 1,
1663                                    cmd->body.type, size,
1664                                    &sw_context->staged_cmd_res);
1665        if (unlikely(ret != 0))
1666                return ret;
1667
1668        return vmw_resource_relocation_add(&sw_context->res_relocations,
1669                                           NULL, &cmd->header.id -
1670                                           sw_context->buf_start);
1671
1672        return 0;
1673}
1674
1675/**
1676 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1677 * command
1678 *
1679 * @dev_priv: Pointer to a device private struct.
1680 * @sw_context: The software context being used for this batch.
1681 * @header: Pointer to the command header in the command stream.
1682 */
1683static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1684                                  struct vmw_sw_context *sw_context,
1685                                  SVGA3dCmdHeader *header)
1686{
1687        struct vmw_shader_destroy_cmd {
1688                SVGA3dCmdHeader header;
1689                SVGA3dCmdDestroyShader body;
1690        } *cmd;
1691        int ret;
1692        struct vmw_resource_val_node *val;
1693
1694        cmd = container_of(header, struct vmw_shader_destroy_cmd,
1695                           header);
1696
1697        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1698                                user_context_converter, &cmd->body.cid,
1699                                &val);
1700        if (unlikely(ret != 0))
1701                return ret;
1702
1703        if (unlikely(!dev_priv->has_mob))
1704                return 0;
1705
1706        ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1707                                       cmd->body.shid,
1708                                       cmd->body.type,
1709                                       &sw_context->staged_cmd_res);
1710        if (unlikely(ret != 0))
1711                return ret;
1712
1713        return vmw_resource_relocation_add(&sw_context->res_relocations,
1714                                           NULL, &cmd->header.id -
1715                                           sw_context->buf_start);
1716
1717        return 0;
1718}
1719
1720/**
1721 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1722 * command
1723 *
1724 * @dev_priv: Pointer to a device private struct.
1725 * @sw_context: The software context being used for this batch.
1726 * @header: Pointer to the command header in the command stream.
1727 */
1728static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1729                              struct vmw_sw_context *sw_context,
1730                              SVGA3dCmdHeader *header)
1731{
1732        struct vmw_set_shader_cmd {
1733                SVGA3dCmdHeader header;
1734                SVGA3dCmdSetShader body;
1735        } *cmd;
1736        struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1737        struct vmw_ctx_bindinfo bi;
1738        struct vmw_resource *res = NULL;
1739        int ret;
1740
1741        cmd = container_of(header, struct vmw_set_shader_cmd,
1742                           header);
1743
1744        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1745                                user_context_converter, &cmd->body.cid,
1746                                &ctx_node);
1747        if (unlikely(ret != 0))
1748                return ret;
1749
1750        if (!dev_priv->has_mob)
1751                return 0;
1752
1753        if (cmd->body.shid != SVGA3D_INVALID_ID) {
1754                res = vmw_compat_shader_lookup
1755                        (vmw_context_res_man(ctx_node->res),
1756                         cmd->body.shid,
1757                         cmd->body.type);
1758
1759                if (!IS_ERR(res)) {
1760                        ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1761                                                    vmw_res_shader,
1762                                                    &cmd->body.shid, res,
1763                                                    &res_node);
1764                        vmw_resource_unreference(&res);
1765                        if (unlikely(ret != 0))
1766                                return ret;
1767                }
1768        }
1769
1770        if (!res_node) {
1771                ret = vmw_cmd_res_check(dev_priv, sw_context,
1772                                        vmw_res_shader,
1773                                        user_shader_converter,
1774                                        &cmd->body.shid, &res_node);
1775                if (unlikely(ret != 0))
1776                        return ret;
1777        }
1778
1779        bi.ctx = ctx_node->res;
1780        bi.res = res_node ? res_node->res : NULL;
1781        bi.bt = vmw_ctx_binding_shader;
1782        bi.i1.shader_type = cmd->body.type;
1783        return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1784}
1785
1786/**
1787 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1788 * command
1789 *
1790 * @dev_priv: Pointer to a device private struct.
1791 * @sw_context: The software context being used for this batch.
1792 * @header: Pointer to the command header in the command stream.
1793 */
1794static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1795                                    struct vmw_sw_context *sw_context,
1796                                    SVGA3dCmdHeader *header)
1797{
1798        struct vmw_set_shader_const_cmd {
1799                SVGA3dCmdHeader header;
1800                SVGA3dCmdSetShaderConst body;
1801        } *cmd;
1802        int ret;
1803
1804        cmd = container_of(header, struct vmw_set_shader_const_cmd,
1805                           header);
1806
1807        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1808                                user_context_converter, &cmd->body.cid,
1809                                NULL);
1810        if (unlikely(ret != 0))
1811                return ret;
1812
1813        if (dev_priv->has_mob)
1814                header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1815
1816        return 0;
1817}
1818
1819/**
1820 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1821 * command
1822 *
1823 * @dev_priv: Pointer to a device private struct.
1824 * @sw_context: The software context being used for this batch.
1825 * @header: Pointer to the command header in the command stream.
1826 */
1827static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1828                                  struct vmw_sw_context *sw_context,
1829                                  SVGA3dCmdHeader *header)
1830{
1831        struct vmw_bind_gb_shader_cmd {
1832                SVGA3dCmdHeader header;
1833                SVGA3dCmdBindGBShader body;
1834        } *cmd;
1835
1836        cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1837                           header);
1838
1839        return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1840                                     user_shader_converter,
1841                                     &cmd->body.shid, &cmd->body.mobid,
1842                                     cmd->body.offsetInBytes);
1843}
1844
1845static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1846                                struct vmw_sw_context *sw_context,
1847                                void *buf, uint32_t *size)
1848{
1849        uint32_t size_remaining = *size;
1850        uint32_t cmd_id;
1851
1852        cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1853        switch (cmd_id) {
1854        case SVGA_CMD_UPDATE:
1855                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1856                break;
1857        case SVGA_CMD_DEFINE_GMRFB:
1858                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1859                break;
1860        case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1861                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1862                break;
1863        case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1864                *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1865                break;
1866        default:
1867                DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1868                return -EINVAL;
1869        }
1870
1871        if (*size > size_remaining) {
1872                DRM_ERROR("Invalid SVGA command (size mismatch):"
1873                          " %u.\n", cmd_id);
1874                return -EINVAL;
1875        }
1876
1877        if (unlikely(!sw_context->kernel)) {
1878                DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1879                return -EPERM;
1880        }
1881
1882        if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1883                return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1884
1885        return 0;
1886}
1887
1888static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1889        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1890                    false, false, false),
1891        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1892                    false, false, false),
1893        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1894                    true, false, false),
1895        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1896                    true, false, false),
1897        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1898                    true, false, false),
1899        VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1900                    false, false, false),
1901        VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1902                    false, false, false),
1903        VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1904                    true, false, false),
1905        VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1906                    true, false, false),
1907        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1908                    true, false, false),
1909        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1910                    &vmw_cmd_set_render_target_check, true, false, false),
1911        VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1912                    true, false, false),
1913        VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1914                    true, false, false),
1915        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1916                    true, false, false),
1917        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1918                    true, false, false),
1919        VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1920                    true, false, false),
1921        VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1922                    true, false, false),
1923        VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1924                    true, false, false),
1925        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1926                    false, false, false),
1927        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1928                    true, false, false),
1929        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1930                    true, false, false),
1931        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1932                    true, false, false),
1933        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1934                    true, false, false),
1935        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1936                    true, false, false),
1937        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1938                    true, false, false),
1939        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1940                    true, false, false),
1941        VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1942                    true, false, false),
1943        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1944                    true, false, false),
1945        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1946                    true, false, false),
1947        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1948                    &vmw_cmd_blt_surf_screen_check, false, false, false),
1949        VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1950                    false, false, false),
1951        VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1952                    false, false, false),
1953        VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1954                    false, false, false),
1955        VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1956                    false, false, false),
1957        VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1958                    false, false, false),
1959        VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1960                    false, false, false),
1961        VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1962                    false, false, false),
1963        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1964                    false, false, false),
1965        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1966                    false, false, false),
1967        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1968                    false, false, false),
1969        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1970                    false, false, false),
1971        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1972                    false, false, false),
1973        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1974                    false, false, false),
1975        VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1976                    false, false, true),
1977        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1978                    false, false, true),
1979        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1980                    false, false, true),
1981        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1982                    false, false, true),
1983        VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1984                    false, false, true),
1985        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1986                    false, false, true),
1987        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1988                    false, false, true),
1989        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1990                    false, false, true),
1991        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1992                    true, false, true),
1993        VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1994                    false, false, true),
1995        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1996                    true, false, true),
1997        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1998                    &vmw_cmd_update_gb_surface, true, false, true),
1999        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2000                    &vmw_cmd_readback_gb_image, true, false, true),
2001        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2002                    &vmw_cmd_readback_gb_surface, true, false, true),
2003        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2004                    &vmw_cmd_invalidate_gb_image, true, false, true),
2005        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2006                    &vmw_cmd_invalidate_gb_surface, true, false, true),
2007        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2008                    false, false, true),
2009        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2010                    false, false, true),
2011        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2012                    false, false, true),
2013        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2014                    false, false, true),
2015        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2016                    false, false, true),
2017        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2018                    false, false, true),
2019        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2020                    true, false, true),
2021        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2022                    false, false, true),
2023        VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2024                    false, false, false),
2025        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2026                    true, false, true),
2027        VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2028                    true, false, true),
2029        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2030                    true, false, true),
2031        VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2032                    true, false, true),
2033        VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2034                    false, false, true),
2035        VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2036                    false, false, true),
2037        VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2038                    false, false, true),
2039        VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2040                    false, false, true),
2041        VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2042                    false, false, true),
2043        VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2044                    false, false, true),
2045        VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2046                    false, false, true),
2047        VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2048                    false, false, true),
2049        VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2050                    false, false, true),
2051        VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2052                    false, false, true),
2053        VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2054                    true, false, true)
2055};
2056
2057static int vmw_cmd_check(struct vmw_private *dev_priv,
2058                         struct vmw_sw_context *sw_context,
2059                         void *buf, uint32_t *size)
2060{
2061        uint32_t cmd_id;
2062        uint32_t size_remaining = *size;
2063        SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2064        int ret;
2065        const struct vmw_cmd_entry *entry;
2066        bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2067
2068        cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2069        /* Handle any none 3D commands */
2070        if (unlikely(cmd_id < SVGA_CMD_MAX))
2071                return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2072
2073
2074        cmd_id = le32_to_cpu(header->id);
2075        *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2076
2077        cmd_id -= SVGA_3D_CMD_BASE;
2078        if (unlikely(*size > size_remaining))
2079                goto out_invalid;
2080
2081        if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2082                goto out_invalid;
2083
2084        entry = &vmw_cmd_entries[cmd_id];
2085        if (unlikely(!entry->func))
2086                goto out_invalid;
2087
2088        if (unlikely(!entry->user_allow && !sw_context->kernel))
2089                goto out_privileged;
2090
2091        if (unlikely(entry->gb_disable && gb))
2092                goto out_old;
2093
2094        if (unlikely(entry->gb_enable && !gb))
2095                goto out_new;
2096
2097        ret = entry->func(dev_priv, sw_context, header);
2098        if (unlikely(ret != 0))
2099                goto out_invalid;
2100
2101        return 0;
2102out_invalid:
2103        DRM_ERROR("Invalid SVGA3D command: %d\n",
2104                  cmd_id + SVGA_3D_CMD_BASE);
2105        return -EINVAL;
2106out_privileged:
2107        DRM_ERROR("Privileged SVGA3D command: %d\n",
2108                  cmd_id + SVGA_3D_CMD_BASE);
2109        return -EPERM;
2110out_old:
2111        DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2112                  cmd_id + SVGA_3D_CMD_BASE);
2113        return -EINVAL;
2114out_new:
2115        DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2116                  cmd_id + SVGA_3D_CMD_BASE);
2117        return -EINVAL;
2118}
2119
2120static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2121                             struct vmw_sw_context *sw_context,
2122                             void *buf,
2123                             uint32_t size)
2124{
2125        int32_t cur_size = size;
2126        int ret;
2127
2128        sw_context->buf_start = buf;
2129
2130        while (cur_size > 0) {
2131                size = cur_size;
2132                ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2133                if (unlikely(ret != 0))
2134                        return ret;
2135                buf = (void *)((unsigned long) buf + size);
2136                cur_size -= size;
2137        }
2138
2139        if (unlikely(cur_size != 0)) {
2140                DRM_ERROR("Command verifier out of sync.\n");
2141                return -EINVAL;
2142        }
2143
2144        return 0;
2145}
2146
2147static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2148{
2149        sw_context->cur_reloc = 0;
2150}
2151
2152static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2153{
2154        uint32_t i;
2155        struct vmw_relocation *reloc;
2156        struct ttm_validate_buffer *validate;
2157        struct ttm_buffer_object *bo;
2158
2159        for (i = 0; i < sw_context->cur_reloc; ++i) {
2160                reloc = &sw_context->relocs[i];
2161                validate = &sw_context->val_bufs[reloc->index].base;
2162                bo = validate->bo;
2163                switch (bo->mem.mem_type) {
2164                case TTM_PL_VRAM:
2165                        reloc->location->offset += bo->offset;
2166                        reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2167                        break;
2168                case VMW_PL_GMR:
2169                        reloc->location->gmrId = bo->mem.start;
2170                        break;
2171                case VMW_PL_MOB:
2172                        *reloc->mob_loc = bo->mem.start;
2173                        break;
2174                default:
2175                        BUG();
2176                }
2177        }
2178        vmw_free_relocations(sw_context);
2179}
2180
2181/**
2182 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2183 * all resources referenced by it.
2184 *
2185 * @list: The resource list.
2186 */
2187static void vmw_resource_list_unreference(struct list_head *list)
2188{
2189        struct vmw_resource_val_node *val, *val_next;
2190
2191        /*
2192         * Drop references to resources held during command submission.
2193         */
2194
2195        list_for_each_entry_safe(val, val_next, list, head) {
2196                list_del_init(&val->head);
2197                vmw_resource_unreference(&val->res);
2198                if (unlikely(val->staged_bindings))
2199                        kfree(val->staged_bindings);
2200                kfree(val);
2201        }
2202}
2203
2204static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2205{
2206        struct vmw_validate_buffer *entry, *next;
2207        struct vmw_resource_val_node *val;
2208
2209        /*
2210         * Drop references to DMA buffers held during command submission.
2211         */
2212        list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2213                                 base.head) {
2214                list_del(&entry->base.head);
2215                ttm_bo_unref(&entry->base.bo);
2216                (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2217                sw_context->cur_val_buf--;
2218        }
2219        BUG_ON(sw_context->cur_val_buf != 0);
2220
2221        list_for_each_entry(val, &sw_context->resource_list, head)
2222                (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2223}
2224
2225static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2226                                      struct ttm_buffer_object *bo,
2227                                      bool validate_as_mob)
2228{
2229        int ret;
2230
2231
2232        /*
2233         * Don't validate pinned buffers.
2234         */
2235
2236        if (bo == dev_priv->pinned_bo ||
2237            (bo == dev_priv->dummy_query_bo &&
2238             dev_priv->dummy_query_bo_pinned))
2239                return 0;
2240
2241        if (validate_as_mob)
2242                return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2243
2244        /**
2245         * Put BO in VRAM if there is space, otherwise as a GMR.
2246         * If there is no space in VRAM and GMR ids are all used up,
2247         * start evicting GMRs to make room. If the DMA buffer can't be
2248         * used as a GMR, this will return -ENOMEM.
2249         */
2250
2251        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2252        if (likely(ret == 0 || ret == -ERESTARTSYS))
2253                return ret;
2254
2255        /**
2256         * If that failed, try VRAM again, this time evicting
2257         * previous contents.
2258         */
2259
2260        DRM_INFO("Falling through to VRAM.\n");
2261        ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2262        return ret;
2263}
2264
2265static int vmw_validate_buffers(struct vmw_private *dev_priv,
2266                                struct vmw_sw_context *sw_context)
2267{
2268        struct vmw_validate_buffer *entry;
2269        int ret;
2270
2271        list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2272                ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2273                                                 entry->validate_as_mob);
2274                if (unlikely(ret != 0))
2275                        return ret;
2276        }
2277        return 0;
2278}
2279
2280static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2281                                 uint32_t size)
2282{
2283        if (likely(sw_context->cmd_bounce_size >= size))
2284                return 0;
2285
2286        if (sw_context->cmd_bounce_size == 0)
2287                sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2288
2289        while (sw_context->cmd_bounce_size < size) {
2290                sw_context->cmd_bounce_size =
2291                        PAGE_ALIGN(sw_context->cmd_bounce_size +
2292                                   (sw_context->cmd_bounce_size >> 1));
2293        }
2294
2295        if (sw_context->cmd_bounce != NULL)
2296                vfree(sw_context->cmd_bounce);
2297
2298        sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2299
2300        if (sw_context->cmd_bounce == NULL) {
2301                DRM_ERROR("Failed to allocate command bounce buffer.\n");
2302                sw_context->cmd_bounce_size = 0;
2303                return -ENOMEM;
2304        }
2305
2306        return 0;
2307}
2308
2309/**
2310 * vmw_execbuf_fence_commands - create and submit a command stream fence
2311 *
2312 * Creates a fence object and submits a command stream marker.
2313 * If this fails for some reason, We sync the fifo and return NULL.
2314 * It is then safe to fence buffers with a NULL pointer.
2315 *
2316 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2317 * a userspace handle if @p_handle is not NULL, otherwise not.
2318 */
2319
2320int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2321                               struct vmw_private *dev_priv,
2322                               struct vmw_fence_obj **p_fence,
2323                               uint32_t *p_handle)
2324{
2325        uint32_t sequence;
2326        int ret;
2327        bool synced = false;
2328
2329        /* p_handle implies file_priv. */
2330        BUG_ON(p_handle != NULL && file_priv == NULL);
2331
2332        ret = vmw_fifo_send_fence(dev_priv, &sequence);
2333        if (unlikely(ret != 0)) {
2334                DRM_ERROR("Fence submission error. Syncing.\n");
2335                synced = true;
2336        }
2337
2338        if (p_handle != NULL)
2339                ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2340                                            sequence, p_fence, p_handle);
2341        else
2342                ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2343
2344        if (unlikely(ret != 0 && !synced)) {
2345                (void) vmw_fallback_wait(dev_priv, false, false,
2346                                         sequence, false,
2347                                         VMW_FENCE_WAIT_TIMEOUT);
2348                *p_fence = NULL;
2349        }
2350
2351        return 0;
2352}
2353
2354/**
2355 * vmw_execbuf_copy_fence_user - copy fence object information to
2356 * user-space.
2357 *
2358 * @dev_priv: Pointer to a vmw_private struct.
2359 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2360 * @ret: Return value from fence object creation.
2361 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2362 * which the information should be copied.
2363 * @fence: Pointer to the fenc object.
2364 * @fence_handle: User-space fence handle.
2365 *
2366 * This function copies fence information to user-space. If copying fails,
2367 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2368 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2369 * the error will hopefully be detected.
2370 * Also if copying fails, user-space will be unable to signal the fence
2371 * object so we wait for it immediately, and then unreference the
2372 * user-space reference.
2373 */
2374void
2375vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2376                            struct vmw_fpriv *vmw_fp,
2377                            int ret,
2378                            struct drm_vmw_fence_rep __user *user_fence_rep,
2379                            struct vmw_fence_obj *fence,
2380                            uint32_t fence_handle)
2381{
2382        struct drm_vmw_fence_rep fence_rep;
2383
2384        if (user_fence_rep == NULL)
2385                return;
2386
2387        memset(&fence_rep, 0, sizeof(fence_rep));
2388
2389        fence_rep.error = ret;
2390        if (ret == 0) {
2391                BUG_ON(fence == NULL);
2392
2393                fence_rep.handle = fence_handle;
2394                fence_rep.seqno = fence->base.seqno;
2395                vmw_update_seqno(dev_priv, &dev_priv->fifo);
2396                fence_rep.passed_seqno = dev_priv->last_read_seqno;
2397        }
2398
2399        /*
2400         * copy_to_user errors will be detected by user space not
2401         * seeing fence_rep::error filled in. Typically
2402         * user-space would have pre-set that member to -EFAULT.
2403         */
2404        ret = copy_to_user(user_fence_rep, &fence_rep,
2405                           sizeof(fence_rep));
2406
2407        /*
2408         * User-space lost the fence object. We need to sync
2409         * and unreference the handle.
2410         */
2411        if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2412                ttm_ref_object_base_unref(vmw_fp->tfile,
2413                                          fence_handle, TTM_REF_USAGE);
2414                DRM_ERROR("Fence copy error. Syncing.\n");
2415                (void) vmw_fence_obj_wait(fence, false, false,
2416                                          VMW_FENCE_WAIT_TIMEOUT);
2417        }
2418}
2419
2420
2421
2422int vmw_execbuf_process(struct drm_file *file_priv,
2423                        struct vmw_private *dev_priv,
2424                        void __user *user_commands,
2425                        void *kernel_commands,
2426                        uint32_t command_size,
2427                        uint64_t throttle_us,
2428                        struct drm_vmw_fence_rep __user *user_fence_rep,
2429                        struct vmw_fence_obj **out_fence)
2430{
2431        struct vmw_sw_context *sw_context = &dev_priv->ctx;
2432        struct vmw_fence_obj *fence = NULL;
2433        struct vmw_resource *error_resource;
2434        struct list_head resource_list;
2435        struct ww_acquire_ctx ticket;
2436        uint32_t handle;
2437        void *cmd;
2438        int ret;
2439
2440        ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2441        if (unlikely(ret != 0))
2442                return -ERESTARTSYS;
2443
2444        if (kernel_commands == NULL) {
2445                sw_context->kernel = false;
2446
2447                ret = vmw_resize_cmd_bounce(sw_context, command_size);
2448                if (unlikely(ret != 0))
2449                        goto out_unlock;
2450
2451
2452                ret = copy_from_user(sw_context->cmd_bounce,
2453                                     user_commands, command_size);
2454
2455                if (unlikely(ret != 0)) {
2456                        ret = -EFAULT;
2457                        DRM_ERROR("Failed copying commands.\n");
2458                        goto out_unlock;
2459                }
2460                kernel_commands = sw_context->cmd_bounce;
2461        } else
2462                sw_context->kernel = true;
2463
2464        sw_context->fp = vmw_fpriv(file_priv);
2465        sw_context->cur_reloc = 0;
2466        sw_context->cur_val_buf = 0;
2467        INIT_LIST_HEAD(&sw_context->resource_list);
2468        sw_context->cur_query_bo = dev_priv->pinned_bo;
2469        sw_context->last_query_ctx = NULL;
2470        sw_context->needs_post_query_barrier = false;
2471        memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2472        INIT_LIST_HEAD(&sw_context->validate_nodes);
2473        INIT_LIST_HEAD(&sw_context->res_relocations);
2474        if (!sw_context->res_ht_initialized) {
2475                ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2476                if (unlikely(ret != 0))
2477                        goto out_unlock;
2478                sw_context->res_ht_initialized = true;
2479        }
2480        INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2481
2482        INIT_LIST_HEAD(&resource_list);
2483        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2484                                command_size);
2485        if (unlikely(ret != 0))
2486                goto out_err_nores;
2487
2488        ret = vmw_resources_reserve(sw_context);
2489        if (unlikely(ret != 0))
2490                goto out_err_nores;
2491
2492        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2493                                     true, NULL);
2494        if (unlikely(ret != 0))
2495                goto out_err;
2496
2497        ret = vmw_validate_buffers(dev_priv, sw_context);
2498        if (unlikely(ret != 0))
2499                goto out_err;
2500
2501        ret = vmw_resources_validate(sw_context);
2502        if (unlikely(ret != 0))
2503                goto out_err;
2504
2505        if (throttle_us) {
2506                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2507                                   throttle_us);
2508
2509                if (unlikely(ret != 0))
2510                        goto out_err;
2511        }
2512
2513        ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2514        if (unlikely(ret != 0)) {
2515                ret = -ERESTARTSYS;
2516                goto out_err;
2517        }
2518
2519        if (dev_priv->has_mob) {
2520                ret = vmw_rebind_contexts(sw_context);
2521                if (unlikely(ret != 0))
2522                        goto out_unlock_binding;
2523        }
2524
2525        cmd = vmw_fifo_reserve(dev_priv, command_size);
2526        if (unlikely(cmd == NULL)) {
2527                DRM_ERROR("Failed reserving fifo space for commands.\n");
2528                ret = -ENOMEM;
2529                goto out_unlock_binding;
2530        }
2531
2532        vmw_apply_relocations(sw_context);
2533        memcpy(cmd, kernel_commands, command_size);
2534
2535        vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2536        vmw_resource_relocations_free(&sw_context->res_relocations);
2537
2538        vmw_fifo_commit(dev_priv, command_size);
2539
2540        vmw_query_bo_switch_commit(dev_priv, sw_context);
2541        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2542                                         &fence,
2543                                         (user_fence_rep) ? &handle : NULL);
2544        /*
2545         * This error is harmless, because if fence submission fails,
2546         * vmw_fifo_send_fence will sync. The error will be propagated to
2547         * user-space in @fence_rep
2548         */
2549
2550        if (ret != 0)
2551                DRM_ERROR("Fence submission error. Syncing.\n");
2552
2553        vmw_resource_list_unreserve(&sw_context->resource_list, false);
2554        mutex_unlock(&dev_priv->binding_mutex);
2555
2556        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557                                    (void *) fence);
2558
2559        if (unlikely(dev_priv->pinned_bo != NULL &&
2560                     !dev_priv->query_cid_valid))
2561                __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2562
2563        vmw_clear_validations(sw_context);
2564        vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2565                                    user_fence_rep, fence, handle);
2566
2567        /* Don't unreference when handing fence out */
2568        if (unlikely(out_fence != NULL)) {
2569                *out_fence = fence;
2570                fence = NULL;
2571        } else if (likely(fence != NULL)) {
2572                vmw_fence_obj_unreference(&fence);
2573        }
2574
2575        list_splice_init(&sw_context->resource_list, &resource_list);
2576        vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2577        mutex_unlock(&dev_priv->cmdbuf_mutex);
2578
2579        /*
2580         * Unreference resources outside of the cmdbuf_mutex to
2581         * avoid deadlocks in resource destruction paths.
2582         */
2583        vmw_resource_list_unreference(&resource_list);
2584
2585        return 0;
2586
2587out_unlock_binding:
2588        mutex_unlock(&dev_priv->binding_mutex);
2589out_err:
2590        ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2591out_err_nores:
2592        vmw_resource_list_unreserve(&sw_context->resource_list, true);
2593        vmw_resource_relocations_free(&sw_context->res_relocations);
2594        vmw_free_relocations(sw_context);
2595        vmw_clear_validations(sw_context);
2596        if (unlikely(dev_priv->pinned_bo != NULL &&
2597                     !dev_priv->query_cid_valid))
2598                __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2599out_unlock:
2600        list_splice_init(&sw_context->resource_list, &resource_list);
2601        error_resource = sw_context->error_resource;
2602        sw_context->error_resource = NULL;
2603        vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2604        mutex_unlock(&dev_priv->cmdbuf_mutex);
2605
2606        /*
2607         * Unreference resources outside of the cmdbuf_mutex to
2608         * avoid deadlocks in resource destruction paths.
2609         */
2610        vmw_resource_list_unreference(&resource_list);
2611        if (unlikely(error_resource != NULL))
2612                vmw_resource_unreference(&error_resource);
2613
2614        return ret;
2615}
2616
2617/**
2618 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2619 *
2620 * @dev_priv: The device private structure.
2621 *
2622 * This function is called to idle the fifo and unpin the query buffer
2623 * if the normal way to do this hits an error, which should typically be
2624 * extremely rare.
2625 */
2626static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2627{
2628        DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2629
2630        (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2631        vmw_bo_pin(dev_priv->pinned_bo, false);
2632        vmw_bo_pin(dev_priv->dummy_query_bo, false);
2633        dev_priv->dummy_query_bo_pinned = false;
2634}
2635
2636
2637/**
2638 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2639 * query bo.
2640 *
2641 * @dev_priv: The device private structure.
2642 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2643 * _after_ a query barrier that flushes all queries touching the current
2644 * buffer pointed to by @dev_priv->pinned_bo
2645 *
2646 * This function should be used to unpin the pinned query bo, or
2647 * as a query barrier when we need to make sure that all queries have
2648 * finished before the next fifo command. (For example on hardware
2649 * context destructions where the hardware may otherwise leak unfinished
2650 * queries).
2651 *
2652 * This function does not return any failure codes, but make attempts
2653 * to do safe unpinning in case of errors.
2654 *
2655 * The function will synchronize on the previous query barrier, and will
2656 * thus not finish until that barrier has executed.
2657 *
2658 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2659 * before calling this function.
2660 */
2661void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2662                                     struct vmw_fence_obj *fence)
2663{
2664        int ret = 0;
2665        struct list_head validate_list;
2666        struct ttm_validate_buffer pinned_val, query_val;
2667        struct vmw_fence_obj *lfence = NULL;
2668        struct ww_acquire_ctx ticket;
2669
2670        if (dev_priv->pinned_bo == NULL)
2671                goto out_unlock;
2672
2673        INIT_LIST_HEAD(&validate_list);
2674
2675        pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2676        pinned_val.shared = false;
2677        list_add_tail(&pinned_val.head, &validate_list);
2678
2679        query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2680        query_val.shared = false;
2681        list_add_tail(&query_val.head, &validate_list);
2682
2683        ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2684                                     false, NULL);
2685        if (unlikely(ret != 0)) {
2686                vmw_execbuf_unpin_panic(dev_priv);
2687                goto out_no_reserve;
2688        }
2689
2690        if (dev_priv->query_cid_valid) {
2691                BUG_ON(fence != NULL);
2692                ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2693                if (unlikely(ret != 0)) {
2694                        vmw_execbuf_unpin_panic(dev_priv);
2695                        goto out_no_emit;
2696                }
2697                dev_priv->query_cid_valid = false;
2698        }
2699
2700        vmw_bo_pin(dev_priv->pinned_bo, false);
2701        vmw_bo_pin(dev_priv->dummy_query_bo, false);
2702        dev_priv->dummy_query_bo_pinned = false;
2703
2704        if (fence == NULL) {
2705                (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2706                                                  NULL);
2707                fence = lfence;
2708        }
2709        ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2710        if (lfence != NULL)
2711                vmw_fence_obj_unreference(&lfence);
2712
2713        ttm_bo_unref(&query_val.bo);
2714        ttm_bo_unref(&pinned_val.bo);
2715        ttm_bo_unref(&dev_priv->pinned_bo);
2716
2717out_unlock:
2718        return;
2719
2720out_no_emit:
2721        ttm_eu_backoff_reservation(&ticket, &validate_list);
2722out_no_reserve:
2723        ttm_bo_unref(&query_val.bo);
2724        ttm_bo_unref(&pinned_val.bo);
2725        ttm_bo_unref(&dev_priv->pinned_bo);
2726}
2727
2728/**
2729 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2730 * query bo.
2731 *
2732 * @dev_priv: The device private structure.
2733 *
2734 * This function should be used to unpin the pinned query bo, or
2735 * as a query barrier when we need to make sure that all queries have
2736 * finished before the next fifo command. (For example on hardware
2737 * context destructions where the hardware may otherwise leak unfinished
2738 * queries).
2739 *
2740 * This function does not return any failure codes, but make attempts
2741 * to do safe unpinning in case of errors.
2742 *
2743 * The function will synchronize on the previous query barrier, and will
2744 * thus not finish until that barrier has executed.
2745 */
2746void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2747{
2748        mutex_lock(&dev_priv->cmdbuf_mutex);
2749        if (dev_priv->query_cid_valid)
2750                __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2751        mutex_unlock(&dev_priv->cmdbuf_mutex);
2752}
2753
2754
2755int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2756                      struct drm_file *file_priv)
2757{
2758        struct vmw_private *dev_priv = vmw_priv(dev);
2759        struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2760        int ret;
2761
2762        /*
2763         * This will allow us to extend the ioctl argument while
2764         * maintaining backwards compatibility:
2765         * We take different code paths depending on the value of
2766         * arg->version.
2767         */
2768
2769        if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2770                DRM_ERROR("Incorrect execbuf version.\n");
2771                DRM_ERROR("You're running outdated experimental "
2772                          "vmwgfx user-space drivers.");
2773                return -EINVAL;
2774        }
2775
2776        ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2777        if (unlikely(ret != 0))
2778                return ret;
2779
2780        ret = vmw_execbuf_process(file_priv, dev_priv,
2781                                  (void __user *)(unsigned long)arg->commands,
2782                                  NULL, arg->command_size, arg->throttle_us,
2783                                  (void __user *)(unsigned long)arg->fence_rep,
2784                                  NULL);
2785        ttm_read_unlock(&dev_priv->reservation_sem);
2786        if (unlikely(ret != 0))
2787                return ret;
2788
2789        vmw_kms_cursor_post_execbuf(dev_priv);
2790
2791        return 0;
2792}
2793