linux/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Treat context OTables as resources to make use of the resource
  29 * backing MOB eviction mechanism, that is used to read back the COTable
  30 * whenever the backing MOB is evicted.
  31 */
  32
  33#include <drm/ttm/ttm_placement.h>
  34
  35#include "vmwgfx_drv.h"
  36#include "vmwgfx_resource_priv.h"
  37#include "vmwgfx_so.h"
  38
  39/**
  40 * struct vmw_cotable - Context Object Table resource
  41 *
  42 * @res: struct vmw_resource we are deriving from.
  43 * @ctx: non-refcounted pointer to the owning context.
  44 * @size_read_back: Size of data read back during eviction.
  45 * @seen_entries: Seen entries in command stream for this cotable.
  46 * @type: The cotable type.
  47 * @scrubbed: Whether the cotable has been scrubbed.
  48 * @resource_list: List of resources in the cotable.
  49 */
  50struct vmw_cotable {
  51        struct vmw_resource res;
  52        struct vmw_resource *ctx;
  53        size_t size_read_back;
  54        int seen_entries;
  55        u32 type;
  56        bool scrubbed;
  57        struct list_head resource_list;
  58};
  59
  60/**
  61 * struct vmw_cotable_info - Static info about cotable types
  62 *
  63 * @min_initial_entries: Min number of initial intries at cotable allocation
  64 * for this cotable type.
  65 * @size: Size of each entry.
  66 */
  67struct vmw_cotable_info {
  68        u32 min_initial_entries;
  69        u32 size;
  70        void (*unbind_func)(struct vmw_private *, struct list_head *,
  71                            bool);
  72};
  73
  74static const struct vmw_cotable_info co_info[] = {
  75        {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
  76        {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
  77        {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
  78        {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
  79        {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
  80        {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
  81        {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
  82        {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
  83        {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
  84        {1, sizeof(SVGACOTableDXQueryEntry), NULL},
  85        {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
  86};
  87
  88/*
  89 * Cotables with bindings that we remove must be scrubbed first,
  90 * otherwise, the device will swap in an invalid context when we remove
  91 * bindings before scrubbing a cotable...
  92 */
  93const SVGACOTableType vmw_cotable_scrub_order[] = {
  94        SVGA_COTABLE_RTVIEW,
  95        SVGA_COTABLE_DSVIEW,
  96        SVGA_COTABLE_SRVIEW,
  97        SVGA_COTABLE_DXSHADER,
  98        SVGA_COTABLE_ELEMENTLAYOUT,
  99        SVGA_COTABLE_BLENDSTATE,
 100        SVGA_COTABLE_DEPTHSTENCIL,
 101        SVGA_COTABLE_RASTERIZERSTATE,
 102        SVGA_COTABLE_SAMPLER,
 103        SVGA_COTABLE_STREAMOUTPUT,
 104        SVGA_COTABLE_DXQUERY,
 105};
 106
 107static int vmw_cotable_bind(struct vmw_resource *res,
 108                            struct ttm_validate_buffer *val_buf);
 109static int vmw_cotable_unbind(struct vmw_resource *res,
 110                              bool readback,
 111                              struct ttm_validate_buffer *val_buf);
 112static int vmw_cotable_create(struct vmw_resource *res);
 113static int vmw_cotable_destroy(struct vmw_resource *res);
 114
 115static const struct vmw_res_func vmw_cotable_func = {
 116        .res_type = vmw_res_cotable,
 117        .needs_backup = true,
 118        .may_evict = true,
 119        .type_name = "context guest backed object tables",
 120        .backup_placement = &vmw_mob_placement,
 121        .create = vmw_cotable_create,
 122        .destroy = vmw_cotable_destroy,
 123        .bind = vmw_cotable_bind,
 124        .unbind = vmw_cotable_unbind,
 125};
 126
 127/**
 128 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
 129 * vmw_cotable pointer
 130 *
 131 * @res: Pointer to the resource.
 132 */
 133static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
 134{
 135        return container_of(res, struct vmw_cotable, res);
 136}
 137
 138/**
 139 * vmw_cotable_destroy - Cotable resource destroy callback
 140 *
 141 * @res: Pointer to the cotable resource.
 142 *
 143 * There is no device cotable destroy command, so this function only
 144 * makes sure that the resource id is set to invalid.
 145 */
 146static int vmw_cotable_destroy(struct vmw_resource *res)
 147{
 148        res->id = -1;
 149        return 0;
 150}
 151
 152/**
 153 * vmw_cotable_unscrub - Undo a cotable unscrub operation
 154 *
 155 * @res: Pointer to the cotable resource
 156 *
 157 * This function issues commands to (re)bind the cotable to
 158 * its backing mob, which needs to be validated and reserved at this point.
 159 * This is identical to bind() except the function interface looks different.
 160 */
 161static int vmw_cotable_unscrub(struct vmw_resource *res)
 162{
 163        struct vmw_cotable *vcotbl = vmw_cotable(res);
 164        struct vmw_private *dev_priv = res->dev_priv;
 165        struct ttm_buffer_object *bo = &res->backup->base;
 166        struct {
 167                SVGA3dCmdHeader header;
 168                SVGA3dCmdDXSetCOTable body;
 169        } *cmd;
 170
 171        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
 172        lockdep_assert_held(&bo->resv->lock.base);
 173
 174        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 175        if (!cmd)
 176                return -ENOMEM;
 177
 178        WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
 179        WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
 180        cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
 181        cmd->header.size = sizeof(cmd->body);
 182        cmd->body.cid = vcotbl->ctx->id;
 183        cmd->body.type = vcotbl->type;
 184        cmd->body.mobid = bo->mem.start;
 185        cmd->body.validSizeInBytes = vcotbl->size_read_back;
 186
 187        vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
 188        vcotbl->scrubbed = false;
 189
 190        return 0;
 191}
 192
 193/**
 194 * vmw_cotable_bind - Undo a cotable unscrub operation
 195 *
 196 * @res: Pointer to the cotable resource
 197 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
 198 * for convenience / fencing.
 199 *
 200 * This function issues commands to (re)bind the cotable to
 201 * its backing mob, which needs to be validated and reserved at this point.
 202 */
 203static int vmw_cotable_bind(struct vmw_resource *res,
 204                            struct ttm_validate_buffer *val_buf)
 205{
 206        /*
 207         * The create() callback may have changed @res->backup without
 208         * the caller noticing, and with val_buf->bo still pointing to
 209         * the old backup buffer. Although hackish, and not used currently,
 210         * take the opportunity to correct the value here so that it's not
 211         * misused in the future.
 212         */
 213        val_buf->bo = &res->backup->base;
 214
 215        return vmw_cotable_unscrub(res);
 216}
 217
 218/**
 219 * vmw_cotable_scrub - Scrub the cotable from the device.
 220 *
 221 * @res: Pointer to the cotable resource.
 222 * @readback: Whether initiate a readback of the cotable data to the backup
 223 * buffer.
 224 *
 225 * In some situations (context swapouts) it might be desirable to make the
 226 * device forget about the cotable without performing a full unbind. A full
 227 * unbind requires reserved backup buffers and it might not be possible to
 228 * reserve them due to locking order violation issues. The vmw_cotable_scrub
 229 * function implements a partial unbind() without that requirement but with the
 230 * following restrictions.
 231 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
 232 *    be called.
 233 * 2) Before the cotable backing buffer is used by the CPU, or during the
 234 *    resource destruction, vmw_cotable_unbind() must be called.
 235 */
 236int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
 237{
 238        struct vmw_cotable *vcotbl = vmw_cotable(res);
 239        struct vmw_private *dev_priv = res->dev_priv;
 240        size_t submit_size;
 241
 242        struct {
 243                SVGA3dCmdHeader header;
 244                SVGA3dCmdDXReadbackCOTable body;
 245        } *cmd0;
 246        struct {
 247                SVGA3dCmdHeader header;
 248                SVGA3dCmdDXSetCOTable body;
 249        } *cmd1;
 250
 251        if (vcotbl->scrubbed)
 252                return 0;
 253
 254        if (co_info[vcotbl->type].unbind_func)
 255                co_info[vcotbl->type].unbind_func(dev_priv,
 256                                                  &vcotbl->resource_list,
 257                                                  readback);
 258        submit_size = sizeof(*cmd1);
 259        if (readback)
 260                submit_size += sizeof(*cmd0);
 261
 262        cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
 263        if (!cmd1)
 264                return -ENOMEM;
 265
 266        vcotbl->size_read_back = 0;
 267        if (readback) {
 268                cmd0 = (void *) cmd1;
 269                cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
 270                cmd0->header.size = sizeof(cmd0->body);
 271                cmd0->body.cid = vcotbl->ctx->id;
 272                cmd0->body.type = vcotbl->type;
 273                cmd1 = (void *) &cmd0[1];
 274                vcotbl->size_read_back = res->backup_size;
 275        }
 276        cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
 277        cmd1->header.size = sizeof(cmd1->body);
 278        cmd1->body.cid = vcotbl->ctx->id;
 279        cmd1->body.type = vcotbl->type;
 280        cmd1->body.mobid = SVGA3D_INVALID_ID;
 281        cmd1->body.validSizeInBytes = 0;
 282        vmw_fifo_commit_flush(dev_priv, submit_size);
 283        vcotbl->scrubbed = true;
 284
 285        /* Trigger a create() on next validate. */
 286        res->id = -1;
 287
 288        return 0;
 289}
 290
 291/**
 292 * vmw_cotable_unbind - Cotable resource unbind callback
 293 *
 294 * @res: Pointer to the cotable resource.
 295 * @readback: Whether to read back cotable data to the backup buffer.
 296 * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
 297 * for convenience / fencing.
 298 *
 299 * Unbinds the cotable from the device and fences the backup buffer.
 300 */
 301static int vmw_cotable_unbind(struct vmw_resource *res,
 302                              bool readback,
 303                              struct ttm_validate_buffer *val_buf)
 304{
 305        struct vmw_cotable *vcotbl = vmw_cotable(res);
 306        struct vmw_private *dev_priv = res->dev_priv;
 307        struct ttm_buffer_object *bo = val_buf->bo;
 308        struct vmw_fence_obj *fence;
 309
 310        if (list_empty(&res->mob_head))
 311                return 0;
 312
 313        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
 314        lockdep_assert_held(&bo->resv->lock.base);
 315
 316        mutex_lock(&dev_priv->binding_mutex);
 317        if (!vcotbl->scrubbed)
 318                vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
 319        mutex_unlock(&dev_priv->binding_mutex);
 320        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 321        vmw_bo_fence_single(bo, fence);
 322        if (likely(fence != NULL))
 323                vmw_fence_obj_unreference(&fence);
 324
 325        return 0;
 326}
 327
 328/**
 329 * vmw_cotable_readback - Read back a cotable without unbinding.
 330 *
 331 * @res: The cotable resource.
 332 *
 333 * Reads back a cotable to its backing mob without scrubbing the MOB from
 334 * the cotable. The MOB is fenced for subsequent CPU access.
 335 */
 336static int vmw_cotable_readback(struct vmw_resource *res)
 337{
 338        struct vmw_cotable *vcotbl = vmw_cotable(res);
 339        struct vmw_private *dev_priv = res->dev_priv;
 340
 341        struct {
 342                SVGA3dCmdHeader header;
 343                SVGA3dCmdDXReadbackCOTable body;
 344        } *cmd;
 345        struct vmw_fence_obj *fence;
 346
 347        if (!vcotbl->scrubbed) {
 348                cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 349                if (!cmd)
 350                        return -ENOMEM;
 351
 352                cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
 353                cmd->header.size = sizeof(cmd->body);
 354                cmd->body.cid = vcotbl->ctx->id;
 355                cmd->body.type = vcotbl->type;
 356                vcotbl->size_read_back = res->backup_size;
 357                vmw_fifo_commit(dev_priv, sizeof(*cmd));
 358        }
 359
 360        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 361        vmw_bo_fence_single(&res->backup->base, fence);
 362        vmw_fence_obj_unreference(&fence);
 363
 364        return 0;
 365}
 366
 367/**
 368 * vmw_cotable_resize - Resize a cotable.
 369 *
 370 * @res: The cotable resource.
 371 * @new_size: The new size.
 372 *
 373 * Resizes a cotable and binds the new backup buffer.
 374 * On failure the cotable is left intact.
 375 * Important! This function may not fail once the MOB switch has been
 376 * committed to hardware. That would put the device context in an
 377 * invalid state which we can't currently recover from.
 378 */
 379static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 380{
 381        struct ttm_operation_ctx ctx = { false, false };
 382        struct vmw_private *dev_priv = res->dev_priv;
 383        struct vmw_cotable *vcotbl = vmw_cotable(res);
 384        struct vmw_buffer_object *buf, *old_buf = res->backup;
 385        struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
 386        size_t old_size = res->backup_size;
 387        size_t old_size_read_back = vcotbl->size_read_back;
 388        size_t cur_size_read_back;
 389        struct ttm_bo_kmap_obj old_map, new_map;
 390        int ret;
 391        size_t i;
 392
 393        ret = vmw_cotable_readback(res);
 394        if (ret)
 395                return ret;
 396
 397        cur_size_read_back = vcotbl->size_read_back;
 398        vcotbl->size_read_back = old_size_read_back;
 399
 400        /*
 401         * While device is processing, Allocate and reserve a buffer object
 402         * for the new COTable. Initially pin the buffer object to make sure
 403         * we can use tryreserve without failure.
 404         */
 405        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 406        if (!buf)
 407                return -ENOMEM;
 408
 409        ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
 410                          true, vmw_bo_bo_free);
 411        if (ret) {
 412                DRM_ERROR("Failed initializing new cotable MOB.\n");
 413                return ret;
 414        }
 415
 416        bo = &buf->base;
 417        WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
 418
 419        ret = ttm_bo_wait(old_bo, false, false);
 420        if (unlikely(ret != 0)) {
 421                DRM_ERROR("Failed waiting for cotable unbind.\n");
 422                goto out_wait;
 423        }
 424
 425        /*
 426         * Do a page by page copy of COTables. This eliminates slow vmap()s.
 427         * This should really be a TTM utility.
 428         */
 429        for (i = 0; i < old_bo->num_pages; ++i) {
 430                bool dummy;
 431
 432                ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
 433                if (unlikely(ret != 0)) {
 434                        DRM_ERROR("Failed mapping old COTable on resize.\n");
 435                        goto out_wait;
 436                }
 437                ret = ttm_bo_kmap(bo, i, 1, &new_map);
 438                if (unlikely(ret != 0)) {
 439                        DRM_ERROR("Failed mapping new COTable on resize.\n");
 440                        goto out_map_new;
 441                }
 442                memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
 443                       ttm_kmap_obj_virtual(&old_map, &dummy),
 444                       PAGE_SIZE);
 445                ttm_bo_kunmap(&new_map);
 446                ttm_bo_kunmap(&old_map);
 447        }
 448
 449        /* Unpin new buffer, and switch backup buffers. */
 450        ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 451        if (unlikely(ret != 0)) {
 452                DRM_ERROR("Failed validating new COTable backup buffer.\n");
 453                goto out_wait;
 454        }
 455
 456        res->backup = buf;
 457        res->backup_size = new_size;
 458        vcotbl->size_read_back = cur_size_read_back;
 459
 460        /*
 461         * Now tell the device to switch. If this fails, then we need to
 462         * revert the full resize.
 463         */
 464        ret = vmw_cotable_unscrub(res);
 465        if (ret) {
 466                DRM_ERROR("Failed switching COTable backup buffer.\n");
 467                res->backup = old_buf;
 468                res->backup_size = old_size;
 469                vcotbl->size_read_back = old_size_read_back;
 470                goto out_wait;
 471        }
 472
 473        /* Let go of the old mob. */
 474        list_del(&res->mob_head);
 475        list_add_tail(&res->mob_head, &buf->res_list);
 476        vmw_bo_unreference(&old_buf);
 477        res->id = vcotbl->type;
 478
 479        return 0;
 480
 481out_map_new:
 482        ttm_bo_kunmap(&old_map);
 483out_wait:
 484        ttm_bo_unreserve(bo);
 485        vmw_bo_unreference(&buf);
 486
 487        return ret;
 488}
 489
 490/**
 491 * vmw_cotable_create - Cotable resource create callback
 492 *
 493 * @res: Pointer to a cotable resource.
 494 *
 495 * There is no separate create command for cotables, so this callback, which
 496 * is called before bind() in the validation sequence is instead used for two
 497 * things.
 498 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
 499 *    buffer, that is, if @res->mob_head is non-empty.
 500 * 2) Resize the cotable if needed.
 501 */
 502static int vmw_cotable_create(struct vmw_resource *res)
 503{
 504        struct vmw_cotable *vcotbl = vmw_cotable(res);
 505        size_t new_size = res->backup_size;
 506        size_t needed_size;
 507        int ret;
 508
 509        /* Check whether we need to resize the cotable */
 510        needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
 511        while (needed_size > new_size)
 512                new_size *= 2;
 513
 514        if (likely(new_size <= res->backup_size)) {
 515                if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
 516                        ret = vmw_cotable_unscrub(res);
 517                        if (ret)
 518                                return ret;
 519                }
 520                res->id = vcotbl->type;
 521                return 0;
 522        }
 523
 524        return vmw_cotable_resize(res, new_size);
 525}
 526
 527/**
 528 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
 529 *
 530 * @res: Pointer to a cotable resource.
 531 *
 532 * The final (part of resource destruction) destroy callback.
 533 */
 534static void vmw_hw_cotable_destroy(struct vmw_resource *res)
 535{
 536        (void) vmw_cotable_destroy(res);
 537}
 538
 539static size_t cotable_acc_size;
 540
 541/**
 542 * vmw_cotable_free - Cotable resource destructor
 543 *
 544 * @res: Pointer to a cotable resource.
 545 */
 546static void vmw_cotable_free(struct vmw_resource *res)
 547{
 548        struct vmw_private *dev_priv = res->dev_priv;
 549
 550        kfree(res);
 551        ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
 552}
 553
 554/**
 555 * vmw_cotable_alloc - Create a cotable resource
 556 *
 557 * @dev_priv: Pointer to a device private struct.
 558 * @ctx: Pointer to the context resource.
 559 * The cotable resource will not add a refcount.
 560 * @type: The cotable type.
 561 */
 562struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
 563                                       struct vmw_resource *ctx,
 564                                       u32 type)
 565{
 566        struct vmw_cotable *vcotbl;
 567        struct ttm_operation_ctx ttm_opt_ctx = {
 568                .interruptible = true,
 569                .no_wait_gpu = false
 570        };
 571        int ret;
 572        u32 num_entries;
 573
 574        if (unlikely(cotable_acc_size == 0))
 575                cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
 576
 577        ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 578                                   cotable_acc_size, &ttm_opt_ctx);
 579        if (unlikely(ret))
 580                return ERR_PTR(ret);
 581
 582        vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
 583        if (unlikely(!vcotbl)) {
 584                ret = -ENOMEM;
 585                goto out_no_alloc;
 586        }
 587
 588        ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
 589                                vmw_cotable_free, &vmw_cotable_func);
 590        if (unlikely(ret != 0))
 591                goto out_no_init;
 592
 593        INIT_LIST_HEAD(&vcotbl->resource_list);
 594        vcotbl->res.id = type;
 595        vcotbl->res.backup_size = PAGE_SIZE;
 596        num_entries = PAGE_SIZE / co_info[type].size;
 597        if (num_entries < co_info[type].min_initial_entries) {
 598                vcotbl->res.backup_size = co_info[type].min_initial_entries *
 599                        co_info[type].size;
 600                vcotbl->res.backup_size =
 601                        (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 602        }
 603
 604        vcotbl->scrubbed = true;
 605        vcotbl->seen_entries = -1;
 606        vcotbl->type = type;
 607        vcotbl->ctx = ctx;
 608
 609        vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
 610
 611        return &vcotbl->res;
 612
 613out_no_init:
 614        kfree(vcotbl);
 615out_no_alloc:
 616        ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
 617        return ERR_PTR(ret);
 618}
 619
 620/**
 621 * vmw_cotable_notify - Notify the cotable about an item creation
 622 *
 623 * @res: Pointer to a cotable resource.
 624 * @id: Item id.
 625 */
 626int vmw_cotable_notify(struct vmw_resource *res, int id)
 627{
 628        struct vmw_cotable *vcotbl = vmw_cotable(res);
 629
 630        if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
 631                DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
 632                          (unsigned) vcotbl->type, id);
 633                return -EINVAL;
 634        }
 635
 636        if (vcotbl->seen_entries < id) {
 637                /* Trigger a call to create() on next validate */
 638                res->id = -1;
 639                vcotbl->seen_entries = id;
 640        }
 641
 642        return 0;
 643}
 644
 645/**
 646 * vmw_cotable_add_view - add a view to the cotable's list of active views.
 647 *
 648 * @res: pointer struct vmw_resource representing the cotable.
 649 * @head: pointer to the struct list_head member of the resource, dedicated
 650 * to the cotable active resource list.
 651 */
 652void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
 653{
 654        struct vmw_cotable *vcotbl =
 655                container_of(res, struct vmw_cotable, res);
 656
 657        list_add_tail(head, &vcotbl->resource_list);
 658}
 659