linux/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Treat context OTables as resources to make use of the resource
  29 * backing MOB eviction mechanism, that is used to read back the COTable
  30 * whenever the backing MOB is evicted.
  31 */
  32
  33#include <drm/ttm/ttm_placement.h>
  34
  35#include "vmwgfx_drv.h"
  36#include "vmwgfx_resource_priv.h"
  37#include "vmwgfx_so.h"
  38
  39/**
  40 * struct vmw_cotable - Context Object Table resource
  41 *
  42 * @res: struct vmw_resource we are deriving from.
  43 * @ctx: non-refcounted pointer to the owning context.
  44 * @size_read_back: Size of data read back during eviction.
  45 * @seen_entries: Seen entries in command stream for this cotable.
  46 * @type: The cotable type.
  47 * @scrubbed: Whether the cotable has been scrubbed.
  48 * @resource_list: List of resources in the cotable.
  49 */
  50struct vmw_cotable {
  51        struct vmw_resource res;
  52        struct vmw_resource *ctx;
  53        size_t size_read_back;
  54        int seen_entries;
  55        u32 type;
  56        bool scrubbed;
  57        struct list_head resource_list;
  58};
  59
  60/**
  61 * struct vmw_cotable_info - Static info about cotable types
  62 *
  63 * @min_initial_entries: Min number of initial intries at cotable allocation
  64 * for this cotable type.
  65 * @size: Size of each entry.
  66 * @unbind_func: Unbind call-back function.
  67 */
  68struct vmw_cotable_info {
  69        u32 min_initial_entries;
  70        u32 size;
  71        void (*unbind_func)(struct vmw_private *, struct list_head *,
  72                            bool);
  73};
  74
  75static const struct vmw_cotable_info co_info[] = {
  76        {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
  77        {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
  78        {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
  79        {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
  80        {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
  81        {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
  82        {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
  83        {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
  84        {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
  85        {1, sizeof(SVGACOTableDXQueryEntry), NULL},
  86        {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
  87        {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
  88};
  89
  90/*
  91 * Cotables with bindings that we remove must be scrubbed first,
  92 * otherwise, the device will swap in an invalid context when we remove
  93 * bindings before scrubbing a cotable...
  94 */
  95const SVGACOTableType vmw_cotable_scrub_order[] = {
  96        SVGA_COTABLE_RTVIEW,
  97        SVGA_COTABLE_DSVIEW,
  98        SVGA_COTABLE_SRVIEW,
  99        SVGA_COTABLE_DXSHADER,
 100        SVGA_COTABLE_ELEMENTLAYOUT,
 101        SVGA_COTABLE_BLENDSTATE,
 102        SVGA_COTABLE_DEPTHSTENCIL,
 103        SVGA_COTABLE_RASTERIZERSTATE,
 104        SVGA_COTABLE_SAMPLER,
 105        SVGA_COTABLE_STREAMOUTPUT,
 106        SVGA_COTABLE_DXQUERY,
 107        SVGA_COTABLE_UAVIEW,
 108};
 109
 110static int vmw_cotable_bind(struct vmw_resource *res,
 111                            struct ttm_validate_buffer *val_buf);
 112static int vmw_cotable_unbind(struct vmw_resource *res,
 113                              bool readback,
 114                              struct ttm_validate_buffer *val_buf);
 115static int vmw_cotable_create(struct vmw_resource *res);
 116static int vmw_cotable_destroy(struct vmw_resource *res);
 117
 118static const struct vmw_res_func vmw_cotable_func = {
 119        .res_type = vmw_res_cotable,
 120        .needs_backup = true,
 121        .may_evict = true,
 122        .prio = 3,
 123        .dirty_prio = 3,
 124        .type_name = "context guest backed object tables",
 125        .backup_placement = &vmw_mob_placement,
 126        .create = vmw_cotable_create,
 127        .destroy = vmw_cotable_destroy,
 128        .bind = vmw_cotable_bind,
 129        .unbind = vmw_cotable_unbind,
 130};
 131
 132/**
 133 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
 134 * vmw_cotable pointer
 135 *
 136 * @res: Pointer to the resource.
 137 */
 138static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
 139{
 140        return container_of(res, struct vmw_cotable, res);
 141}
 142
 143/**
 144 * vmw_cotable_destroy - Cotable resource destroy callback
 145 *
 146 * @res: Pointer to the cotable resource.
 147 *
 148 * There is no device cotable destroy command, so this function only
 149 * makes sure that the resource id is set to invalid.
 150 */
 151static int vmw_cotable_destroy(struct vmw_resource *res)
 152{
 153        res->id = -1;
 154        return 0;
 155}
 156
 157/**
 158 * vmw_cotable_unscrub - Undo a cotable unscrub operation
 159 *
 160 * @res: Pointer to the cotable resource
 161 *
 162 * This function issues commands to (re)bind the cotable to
 163 * its backing mob, which needs to be validated and reserved at this point.
 164 * This is identical to bind() except the function interface looks different.
 165 */
 166static int vmw_cotable_unscrub(struct vmw_resource *res)
 167{
 168        struct vmw_cotable *vcotbl = vmw_cotable(res);
 169        struct vmw_private *dev_priv = res->dev_priv;
 170        struct ttm_buffer_object *bo = &res->backup->base;
 171        struct {
 172                SVGA3dCmdHeader header;
 173                SVGA3dCmdDXSetCOTable body;
 174        } *cmd;
 175
 176        WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
 177        dma_resv_assert_held(bo->base.resv);
 178
 179        cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 180        if (!cmd)
 181                return -ENOMEM;
 182
 183        WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
 184        WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
 185        cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
 186        cmd->header.size = sizeof(cmd->body);
 187        cmd->body.cid = vcotbl->ctx->id;
 188        cmd->body.type = vcotbl->type;
 189        cmd->body.mobid = bo->resource->start;
 190        cmd->body.validSizeInBytes = vcotbl->size_read_back;
 191
 192        vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
 193        vcotbl->scrubbed = false;
 194
 195        return 0;
 196}
 197
 198/**
 199 * vmw_cotable_bind - Undo a cotable unscrub operation
 200 *
 201 * @res: Pointer to the cotable resource
 202 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
 203 * for convenience / fencing.
 204 *
 205 * This function issues commands to (re)bind the cotable to
 206 * its backing mob, which needs to be validated and reserved at this point.
 207 */
 208static int vmw_cotable_bind(struct vmw_resource *res,
 209                            struct ttm_validate_buffer *val_buf)
 210{
 211        /*
 212         * The create() callback may have changed @res->backup without
 213         * the caller noticing, and with val_buf->bo still pointing to
 214         * the old backup buffer. Although hackish, and not used currently,
 215         * take the opportunity to correct the value here so that it's not
 216         * misused in the future.
 217         */
 218        val_buf->bo = &res->backup->base;
 219
 220        return vmw_cotable_unscrub(res);
 221}
 222
 223/**
 224 * vmw_cotable_scrub - Scrub the cotable from the device.
 225 *
 226 * @res: Pointer to the cotable resource.
 227 * @readback: Whether initiate a readback of the cotable data to the backup
 228 * buffer.
 229 *
 230 * In some situations (context swapouts) it might be desirable to make the
 231 * device forget about the cotable without performing a full unbind. A full
 232 * unbind requires reserved backup buffers and it might not be possible to
 233 * reserve them due to locking order violation issues. The vmw_cotable_scrub
 234 * function implements a partial unbind() without that requirement but with the
 235 * following restrictions.
 236 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
 237 *    be called.
 238 * 2) Before the cotable backing buffer is used by the CPU, or during the
 239 *    resource destruction, vmw_cotable_unbind() must be called.
 240 */
 241int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
 242{
 243        struct vmw_cotable *vcotbl = vmw_cotable(res);
 244        struct vmw_private *dev_priv = res->dev_priv;
 245        size_t submit_size;
 246
 247        struct {
 248                SVGA3dCmdHeader header;
 249                SVGA3dCmdDXReadbackCOTable body;
 250        } *cmd0;
 251        struct {
 252                SVGA3dCmdHeader header;
 253                SVGA3dCmdDXSetCOTable body;
 254        } *cmd1;
 255
 256        if (vcotbl->scrubbed)
 257                return 0;
 258
 259        if (co_info[vcotbl->type].unbind_func)
 260                co_info[vcotbl->type].unbind_func(dev_priv,
 261                                                  &vcotbl->resource_list,
 262                                                  readback);
 263        submit_size = sizeof(*cmd1);
 264        if (readback)
 265                submit_size += sizeof(*cmd0);
 266
 267        cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
 268        if (!cmd1)
 269                return -ENOMEM;
 270
 271        vcotbl->size_read_back = 0;
 272        if (readback) {
 273                cmd0 = (void *) cmd1;
 274                cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
 275                cmd0->header.size = sizeof(cmd0->body);
 276                cmd0->body.cid = vcotbl->ctx->id;
 277                cmd0->body.type = vcotbl->type;
 278                cmd1 = (void *) &cmd0[1];
 279                vcotbl->size_read_back = res->backup_size;
 280        }
 281        cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
 282        cmd1->header.size = sizeof(cmd1->body);
 283        cmd1->body.cid = vcotbl->ctx->id;
 284        cmd1->body.type = vcotbl->type;
 285        cmd1->body.mobid = SVGA3D_INVALID_ID;
 286        cmd1->body.validSizeInBytes = 0;
 287        vmw_cmd_commit_flush(dev_priv, submit_size);
 288        vcotbl->scrubbed = true;
 289
 290        /* Trigger a create() on next validate. */
 291        res->id = -1;
 292
 293        return 0;
 294}
 295
 296/**
 297 * vmw_cotable_unbind - Cotable resource unbind callback
 298 *
 299 * @res: Pointer to the cotable resource.
 300 * @readback: Whether to read back cotable data to the backup buffer.
 301 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
 302 * for convenience / fencing.
 303 *
 304 * Unbinds the cotable from the device and fences the backup buffer.
 305 */
 306static int vmw_cotable_unbind(struct vmw_resource *res,
 307                              bool readback,
 308                              struct ttm_validate_buffer *val_buf)
 309{
 310        struct vmw_cotable *vcotbl = vmw_cotable(res);
 311        struct vmw_private *dev_priv = res->dev_priv;
 312        struct ttm_buffer_object *bo = val_buf->bo;
 313        struct vmw_fence_obj *fence;
 314
 315        if (!vmw_resource_mob_attached(res))
 316                return 0;
 317
 318        WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
 319        dma_resv_assert_held(bo->base.resv);
 320
 321        mutex_lock(&dev_priv->binding_mutex);
 322        if (!vcotbl->scrubbed)
 323                vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
 324        mutex_unlock(&dev_priv->binding_mutex);
 325        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 326        vmw_bo_fence_single(bo, fence);
 327        if (likely(fence != NULL))
 328                vmw_fence_obj_unreference(&fence);
 329
 330        return 0;
 331}
 332
 333/**
 334 * vmw_cotable_readback - Read back a cotable without unbinding.
 335 *
 336 * @res: The cotable resource.
 337 *
 338 * Reads back a cotable to its backing mob without scrubbing the MOB from
 339 * the cotable. The MOB is fenced for subsequent CPU access.
 340 */
 341static int vmw_cotable_readback(struct vmw_resource *res)
 342{
 343        struct vmw_cotable *vcotbl = vmw_cotable(res);
 344        struct vmw_private *dev_priv = res->dev_priv;
 345
 346        struct {
 347                SVGA3dCmdHeader header;
 348                SVGA3dCmdDXReadbackCOTable body;
 349        } *cmd;
 350        struct vmw_fence_obj *fence;
 351
 352        if (!vcotbl->scrubbed) {
 353                cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 354                if (!cmd)
 355                        return -ENOMEM;
 356
 357                cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
 358                cmd->header.size = sizeof(cmd->body);
 359                cmd->body.cid = vcotbl->ctx->id;
 360                cmd->body.type = vcotbl->type;
 361                vcotbl->size_read_back = res->backup_size;
 362                vmw_cmd_commit(dev_priv, sizeof(*cmd));
 363        }
 364
 365        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 366        vmw_bo_fence_single(&res->backup->base, fence);
 367        vmw_fence_obj_unreference(&fence);
 368
 369        return 0;
 370}
 371
 372/**
 373 * vmw_cotable_resize - Resize a cotable.
 374 *
 375 * @res: The cotable resource.
 376 * @new_size: The new size.
 377 *
 378 * Resizes a cotable and binds the new backup buffer.
 379 * On failure the cotable is left intact.
 380 * Important! This function may not fail once the MOB switch has been
 381 * committed to hardware. That would put the device context in an
 382 * invalid state which we can't currently recover from.
 383 */
 384static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 385{
 386        struct ttm_operation_ctx ctx = { false, false };
 387        struct vmw_private *dev_priv = res->dev_priv;
 388        struct vmw_cotable *vcotbl = vmw_cotable(res);
 389        struct vmw_buffer_object *buf, *old_buf = res->backup;
 390        struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
 391        size_t old_size = res->backup_size;
 392        size_t old_size_read_back = vcotbl->size_read_back;
 393        size_t cur_size_read_back;
 394        struct ttm_bo_kmap_obj old_map, new_map;
 395        int ret;
 396        size_t i;
 397
 398        ret = vmw_cotable_readback(res);
 399        if (ret)
 400                return ret;
 401
 402        cur_size_read_back = vcotbl->size_read_back;
 403        vcotbl->size_read_back = old_size_read_back;
 404
 405        /*
 406         * While device is processing, Allocate and reserve a buffer object
 407         * for the new COTable. Initially pin the buffer object to make sure
 408         * we can use tryreserve without failure.
 409         */
 410        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 411        if (!buf)
 412                return -ENOMEM;
 413
 414        ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
 415                          true, true, vmw_bo_bo_free);
 416        if (ret) {
 417                DRM_ERROR("Failed initializing new cotable MOB.\n");
 418                return ret;
 419        }
 420
 421        bo = &buf->base;
 422        WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
 423
 424        ret = ttm_bo_wait(old_bo, false, false);
 425        if (unlikely(ret != 0)) {
 426                DRM_ERROR("Failed waiting for cotable unbind.\n");
 427                goto out_wait;
 428        }
 429
 430        /*
 431         * Do a page by page copy of COTables. This eliminates slow vmap()s.
 432         * This should really be a TTM utility.
 433         */
 434        for (i = 0; i < old_bo->resource->num_pages; ++i) {
 435                bool dummy;
 436
 437                ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
 438                if (unlikely(ret != 0)) {
 439                        DRM_ERROR("Failed mapping old COTable on resize.\n");
 440                        goto out_wait;
 441                }
 442                ret = ttm_bo_kmap(bo, i, 1, &new_map);
 443                if (unlikely(ret != 0)) {
 444                        DRM_ERROR("Failed mapping new COTable on resize.\n");
 445                        goto out_map_new;
 446                }
 447                memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
 448                       ttm_kmap_obj_virtual(&old_map, &dummy),
 449                       PAGE_SIZE);
 450                ttm_bo_kunmap(&new_map);
 451                ttm_bo_kunmap(&old_map);
 452        }
 453
 454        /* Unpin new buffer, and switch backup buffers. */
 455        ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 456        if (unlikely(ret != 0)) {
 457                DRM_ERROR("Failed validating new COTable backup buffer.\n");
 458                goto out_wait;
 459        }
 460
 461        vmw_resource_mob_detach(res);
 462        res->backup = buf;
 463        res->backup_size = new_size;
 464        vcotbl->size_read_back = cur_size_read_back;
 465
 466        /*
 467         * Now tell the device to switch. If this fails, then we need to
 468         * revert the full resize.
 469         */
 470        ret = vmw_cotable_unscrub(res);
 471        if (ret) {
 472                DRM_ERROR("Failed switching COTable backup buffer.\n");
 473                res->backup = old_buf;
 474                res->backup_size = old_size;
 475                vcotbl->size_read_back = old_size_read_back;
 476                vmw_resource_mob_attach(res);
 477                goto out_wait;
 478        }
 479
 480        vmw_resource_mob_attach(res);
 481        /* Let go of the old mob. */
 482        vmw_bo_unreference(&old_buf);
 483        res->id = vcotbl->type;
 484
 485        /* Release the pin acquired in vmw_bo_init */
 486        ttm_bo_unpin(bo);
 487
 488        return 0;
 489
 490out_map_new:
 491        ttm_bo_kunmap(&old_map);
 492out_wait:
 493        ttm_bo_unpin(bo);
 494        ttm_bo_unreserve(bo);
 495        vmw_bo_unreference(&buf);
 496
 497        return ret;
 498}
 499
 500/**
 501 * vmw_cotable_create - Cotable resource create callback
 502 *
 503 * @res: Pointer to a cotable resource.
 504 *
 505 * There is no separate create command for cotables, so this callback, which
 506 * is called before bind() in the validation sequence is instead used for two
 507 * things.
 508 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
 509 *    buffer.
 510 * 2) Resize the cotable if needed.
 511 */
 512static int vmw_cotable_create(struct vmw_resource *res)
 513{
 514        struct vmw_cotable *vcotbl = vmw_cotable(res);
 515        size_t new_size = res->backup_size;
 516        size_t needed_size;
 517        int ret;
 518
 519        /* Check whether we need to resize the cotable */
 520        needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
 521        while (needed_size > new_size)
 522                new_size *= 2;
 523
 524        if (likely(new_size <= res->backup_size)) {
 525                if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
 526                        ret = vmw_cotable_unscrub(res);
 527                        if (ret)
 528                                return ret;
 529                }
 530                res->id = vcotbl->type;
 531                return 0;
 532        }
 533
 534        return vmw_cotable_resize(res, new_size);
 535}
 536
 537/**
 538 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
 539 *
 540 * @res: Pointer to a cotable resource.
 541 *
 542 * The final (part of resource destruction) destroy callback.
 543 */
 544static void vmw_hw_cotable_destroy(struct vmw_resource *res)
 545{
 546        (void) vmw_cotable_destroy(res);
 547}
 548
 549static size_t cotable_acc_size;
 550
 551/**
 552 * vmw_cotable_free - Cotable resource destructor
 553 *
 554 * @res: Pointer to a cotable resource.
 555 */
 556static void vmw_cotable_free(struct vmw_resource *res)
 557{
 558        struct vmw_private *dev_priv = res->dev_priv;
 559
 560        kfree(res);
 561        ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
 562}
 563
 564/**
 565 * vmw_cotable_alloc - Create a cotable resource
 566 *
 567 * @dev_priv: Pointer to a device private struct.
 568 * @ctx: Pointer to the context resource.
 569 * The cotable resource will not add a refcount.
 570 * @type: The cotable type.
 571 */
 572struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
 573                                       struct vmw_resource *ctx,
 574                                       u32 type)
 575{
 576        struct vmw_cotable *vcotbl;
 577        struct ttm_operation_ctx ttm_opt_ctx = {
 578                .interruptible = true,
 579                .no_wait_gpu = false
 580        };
 581        int ret;
 582        u32 num_entries;
 583
 584        if (unlikely(cotable_acc_size == 0))
 585                cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
 586
 587        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 588                                   cotable_acc_size, &ttm_opt_ctx);
 589        if (unlikely(ret))
 590                return ERR_PTR(ret);
 591
 592        vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
 593        if (unlikely(!vcotbl)) {
 594                ret = -ENOMEM;
 595                goto out_no_alloc;
 596        }
 597
 598        ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
 599                                vmw_cotable_free, &vmw_cotable_func);
 600        if (unlikely(ret != 0))
 601                goto out_no_init;
 602
 603        INIT_LIST_HEAD(&vcotbl->resource_list);
 604        vcotbl->res.id = type;
 605        vcotbl->res.backup_size = PAGE_SIZE;
 606        num_entries = PAGE_SIZE / co_info[type].size;
 607        if (num_entries < co_info[type].min_initial_entries) {
 608                vcotbl->res.backup_size = co_info[type].min_initial_entries *
 609                        co_info[type].size;
 610                vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
 611        }
 612
 613        vcotbl->scrubbed = true;
 614        vcotbl->seen_entries = -1;
 615        vcotbl->type = type;
 616        vcotbl->ctx = ctx;
 617
 618        vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
 619
 620        return &vcotbl->res;
 621
 622out_no_init:
 623        kfree(vcotbl);
 624out_no_alloc:
 625        ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
 626        return ERR_PTR(ret);
 627}
 628
 629/**
 630 * vmw_cotable_notify - Notify the cotable about an item creation
 631 *
 632 * @res: Pointer to a cotable resource.
 633 * @id: Item id.
 634 */
 635int vmw_cotable_notify(struct vmw_resource *res, int id)
 636{
 637        struct vmw_cotable *vcotbl = vmw_cotable(res);
 638
 639        if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
 640                DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
 641                          (unsigned) vcotbl->type, id);
 642                return -EINVAL;
 643        }
 644
 645        if (vcotbl->seen_entries < id) {
 646                /* Trigger a call to create() on next validate */
 647                res->id = -1;
 648                vcotbl->seen_entries = id;
 649        }
 650
 651        return 0;
 652}
 653
 654/**
 655 * vmw_cotable_add_resource - add a view to the cotable's list of active views.
 656 *
 657 * @res: pointer struct vmw_resource representing the cotable.
 658 * @head: pointer to the struct list_head member of the resource, dedicated
 659 * to the cotable active resource list.
 660 */
 661void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
 662{
 663        struct vmw_cotable *vcotbl =
 664                container_of(res, struct vmw_cotable, res);
 665
 666        list_add_tail(head, &vcotbl->resource_list);
 667}
 668