linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_placement.h>
  31#include <drm/drmP.h>
  32#include "vmwgfx_resource_priv.h"
  33#include "vmwgfx_binding.h"
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  38{
  39        kref_get(&res->kref);
  40        return res;
  41}
  42
  43struct vmw_resource *
  44vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  45{
  46        return kref_get_unless_zero(&res->kref) ? res : NULL;
  47}
  48
  49/**
  50 * vmw_resource_release_id - release a resource id to the id manager.
  51 *
  52 * @res: Pointer to the resource.
  53 *
  54 * Release the resource id to the resource id manager and set it to -1
  55 */
  56void vmw_resource_release_id(struct vmw_resource *res)
  57{
  58        struct vmw_private *dev_priv = res->dev_priv;
  59        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  60
  61        spin_lock(&dev_priv->resource_lock);
  62        if (res->id != -1)
  63                idr_remove(idr, res->id);
  64        res->id = -1;
  65        spin_unlock(&dev_priv->resource_lock);
  66}
  67
  68static void vmw_resource_release(struct kref *kref)
  69{
  70        struct vmw_resource *res =
  71            container_of(kref, struct vmw_resource, kref);
  72        struct vmw_private *dev_priv = res->dev_priv;
  73        int id;
  74        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  75
  76        spin_lock(&dev_priv->resource_lock);
  77        list_del_init(&res->lru_head);
  78        spin_unlock(&dev_priv->resource_lock);
  79        if (res->backup) {
  80                struct ttm_buffer_object *bo = &res->backup->base;
  81
  82                ttm_bo_reserve(bo, false, false, NULL);
  83                if (!list_empty(&res->mob_head) &&
  84                    res->func->unbind != NULL) {
  85                        struct ttm_validate_buffer val_buf;
  86
  87                        val_buf.bo = bo;
  88                        val_buf.num_shared = 0;
  89                        res->func->unbind(res, false, &val_buf);
  90                }
  91                res->backup_dirty = false;
  92                list_del_init(&res->mob_head);
  93                ttm_bo_unreserve(bo);
  94                vmw_bo_unreference(&res->backup);
  95        }
  96
  97        if (likely(res->hw_destroy != NULL)) {
  98                mutex_lock(&dev_priv->binding_mutex);
  99                vmw_binding_res_list_kill(&res->binding_head);
 100                mutex_unlock(&dev_priv->binding_mutex);
 101                res->hw_destroy(res);
 102        }
 103
 104        id = res->id;
 105        if (res->res_free != NULL)
 106                res->res_free(res);
 107        else
 108                kfree(res);
 109
 110        spin_lock(&dev_priv->resource_lock);
 111        if (id != -1)
 112                idr_remove(idr, id);
 113        spin_unlock(&dev_priv->resource_lock);
 114}
 115
 116void vmw_resource_unreference(struct vmw_resource **p_res)
 117{
 118        struct vmw_resource *res = *p_res;
 119
 120        *p_res = NULL;
 121        kref_put(&res->kref, vmw_resource_release);
 122}
 123
 124
 125/**
 126 * vmw_resource_alloc_id - release a resource id to the id manager.
 127 *
 128 * @res: Pointer to the resource.
 129 *
 130 * Allocate the lowest free resource from the resource manager, and set
 131 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 132 */
 133int vmw_resource_alloc_id(struct vmw_resource *res)
 134{
 135        struct vmw_private *dev_priv = res->dev_priv;
 136        int ret;
 137        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 138
 139        BUG_ON(res->id != -1);
 140
 141        idr_preload(GFP_KERNEL);
 142        spin_lock(&dev_priv->resource_lock);
 143
 144        ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 145        if (ret >= 0)
 146                res->id = ret;
 147
 148        spin_unlock(&dev_priv->resource_lock);
 149        idr_preload_end();
 150        return ret < 0 ? ret : 0;
 151}
 152
 153/**
 154 * vmw_resource_init - initialize a struct vmw_resource
 155 *
 156 * @dev_priv:       Pointer to a device private struct.
 157 * @res:            The struct vmw_resource to initialize.
 158 * @obj_type:       Resource object type.
 159 * @delay_id:       Boolean whether to defer device id allocation until
 160 *                  the first validation.
 161 * @res_free:       Resource destructor.
 162 * @func:           Resource function table.
 163 */
 164int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 165                      bool delay_id,
 166                      void (*res_free) (struct vmw_resource *res),
 167                      const struct vmw_res_func *func)
 168{
 169        kref_init(&res->kref);
 170        res->hw_destroy = NULL;
 171        res->res_free = res_free;
 172        res->dev_priv = dev_priv;
 173        res->func = func;
 174        INIT_LIST_HEAD(&res->lru_head);
 175        INIT_LIST_HEAD(&res->mob_head);
 176        INIT_LIST_HEAD(&res->binding_head);
 177        res->id = -1;
 178        res->backup = NULL;
 179        res->backup_offset = 0;
 180        res->backup_dirty = false;
 181        res->res_dirty = false;
 182        if (delay_id)
 183                return 0;
 184        else
 185                return vmw_resource_alloc_id(res);
 186}
 187
 188
 189/**
 190 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 191 * TTM user-space handle and perform basic type checks
 192 *
 193 * @dev_priv:     Pointer to a device private struct
 194 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 195 * @handle:       The TTM user-space handle
 196 * @converter:    Pointer to an object describing the resource type
 197 * @p_res:        On successful return the location pointed to will contain
 198 *                a pointer to a refcounted struct vmw_resource.
 199 *
 200 * If the handle can't be found or is associated with an incorrect resource
 201 * type, -EINVAL will be returned.
 202 */
 203int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 204                                    struct ttm_object_file *tfile,
 205                                    uint32_t handle,
 206                                    const struct vmw_user_resource_conv
 207                                    *converter,
 208                                    struct vmw_resource **p_res)
 209{
 210        struct ttm_base_object *base;
 211        struct vmw_resource *res;
 212        int ret = -EINVAL;
 213
 214        base = ttm_base_object_lookup(tfile, handle);
 215        if (unlikely(base == NULL))
 216                return -EINVAL;
 217
 218        if (unlikely(ttm_base_object_type(base) != converter->object_type))
 219                goto out_bad_resource;
 220
 221        res = converter->base_obj_to_res(base);
 222        kref_get(&res->kref);
 223
 224        *p_res = res;
 225        ret = 0;
 226
 227out_bad_resource:
 228        ttm_base_object_unref(&base);
 229
 230        return ret;
 231}
 232
 233/**
 234 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 235 * TTM user-space handle and perform basic type checks
 236 *
 237 * @dev_priv:     Pointer to a device private struct
 238 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 239 * @handle:       The TTM user-space handle
 240 * @converter:    Pointer to an object describing the resource type
 241 * @p_res:        On successful return the location pointed to will contain
 242 *                a pointer to a refcounted struct vmw_resource.
 243 *
 244 * If the handle can't be found or is associated with an incorrect resource
 245 * type, -EINVAL will be returned.
 246 */
 247struct vmw_resource *
 248vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
 249                                      struct ttm_object_file *tfile,
 250                                      uint32_t handle,
 251                                      const struct vmw_user_resource_conv
 252                                      *converter)
 253{
 254        struct ttm_base_object *base;
 255
 256        base = ttm_base_object_noref_lookup(tfile, handle);
 257        if (!base)
 258                return ERR_PTR(-ESRCH);
 259
 260        if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
 261                ttm_base_object_noref_release();
 262                return ERR_PTR(-EINVAL);
 263        }
 264
 265        return converter->base_obj_to_res(base);
 266}
 267
 268/**
 269 * Helper function that looks either a surface or bo.
 270 *
 271 * The pointer this pointed at by out_surf and out_buf needs to be null.
 272 */
 273int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 274                           struct ttm_object_file *tfile,
 275                           uint32_t handle,
 276                           struct vmw_surface **out_surf,
 277                           struct vmw_buffer_object **out_buf)
 278{
 279        struct vmw_resource *res;
 280        int ret;
 281
 282        BUG_ON(*out_surf || *out_buf);
 283
 284        ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 285                                              user_surface_converter,
 286                                              &res);
 287        if (!ret) {
 288                *out_surf = vmw_res_to_srf(res);
 289                return 0;
 290        }
 291
 292        *out_surf = NULL;
 293        ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
 294        return ret;
 295}
 296
 297/**
 298 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 299 *
 300 * @res:            The resource for which to allocate a backup buffer.
 301 * @interruptible:  Whether any sleeps during allocation should be
 302 *                  performed while interruptible.
 303 */
 304static int vmw_resource_buf_alloc(struct vmw_resource *res,
 305                                  bool interruptible)
 306{
 307        unsigned long size =
 308                (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 309        struct vmw_buffer_object *backup;
 310        int ret;
 311
 312        if (likely(res->backup)) {
 313                BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 314                return 0;
 315        }
 316
 317        backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 318        if (unlikely(!backup))
 319                return -ENOMEM;
 320
 321        ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
 322                              res->func->backup_placement,
 323                              interruptible,
 324                              &vmw_bo_bo_free);
 325        if (unlikely(ret != 0))
 326                goto out_no_bo;
 327
 328        res->backup = backup;
 329
 330out_no_bo:
 331        return ret;
 332}
 333
 334/**
 335 * vmw_resource_do_validate - Make a resource up-to-date and visible
 336 *                            to the device.
 337 *
 338 * @res:            The resource to make visible to the device.
 339 * @val_buf:        Information about a buffer possibly
 340 *                  containing backup data if a bind operation is needed.
 341 *
 342 * On hardware resource shortage, this function returns -EBUSY and
 343 * should be retried once resources have been freed up.
 344 */
 345static int vmw_resource_do_validate(struct vmw_resource *res,
 346                                    struct ttm_validate_buffer *val_buf)
 347{
 348        int ret = 0;
 349        const struct vmw_res_func *func = res->func;
 350
 351        if (unlikely(res->id == -1)) {
 352                ret = func->create(res);
 353                if (unlikely(ret != 0))
 354                        return ret;
 355        }
 356
 357        if (func->bind &&
 358            ((func->needs_backup && list_empty(&res->mob_head) &&
 359              val_buf->bo != NULL) ||
 360             (!func->needs_backup && val_buf->bo != NULL))) {
 361                ret = func->bind(res, val_buf);
 362                if (unlikely(ret != 0))
 363                        goto out_bind_failed;
 364                if (func->needs_backup)
 365                        list_add_tail(&res->mob_head, &res->backup->res_list);
 366        }
 367
 368        /*
 369         * Only do this on write operations, and move to
 370         * vmw_resource_unreserve if it can be called after
 371         * backup buffers have been unreserved. Otherwise
 372         * sort out locking.
 373         */
 374        res->res_dirty = true;
 375
 376        return 0;
 377
 378out_bind_failed:
 379        func->destroy(res);
 380
 381        return ret;
 382}
 383
 384/**
 385 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 386 * command submission.
 387 *
 388 * @res:               Pointer to the struct vmw_resource to unreserve.
 389 * @switch_backup:     Backup buffer has been switched.
 390 * @new_backup:        Pointer to new backup buffer if command submission
 391 *                     switched. May be NULL.
 392 * @new_backup_offset: New backup offset if @switch_backup is true.
 393 *
 394 * Currently unreserving a resource means putting it back on the device's
 395 * resource lru list, so that it can be evicted if necessary.
 396 */
 397void vmw_resource_unreserve(struct vmw_resource *res,
 398                            bool switch_backup,
 399                            struct vmw_buffer_object *new_backup,
 400                            unsigned long new_backup_offset)
 401{
 402        struct vmw_private *dev_priv = res->dev_priv;
 403
 404        if (!list_empty(&res->lru_head))
 405                return;
 406
 407        if (switch_backup && new_backup != res->backup) {
 408                if (res->backup) {
 409                        lockdep_assert_held(&res->backup->base.resv->lock.base);
 410                        list_del_init(&res->mob_head);
 411                        vmw_bo_unreference(&res->backup);
 412                }
 413
 414                if (new_backup) {
 415                        res->backup = vmw_bo_reference(new_backup);
 416                        lockdep_assert_held(&new_backup->base.resv->lock.base);
 417                        list_add_tail(&res->mob_head, &new_backup->res_list);
 418                } else {
 419                        res->backup = NULL;
 420                }
 421        }
 422        if (switch_backup)
 423                res->backup_offset = new_backup_offset;
 424
 425        if (!res->func->may_evict || res->id == -1 || res->pin_count)
 426                return;
 427
 428        spin_lock(&dev_priv->resource_lock);
 429        list_add_tail(&res->lru_head,
 430                      &res->dev_priv->res_lru[res->func->res_type]);
 431        spin_unlock(&dev_priv->resource_lock);
 432}
 433
 434/**
 435 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 436 *                             for a resource and in that case, allocate
 437 *                             one, reserve and validate it.
 438 *
 439 * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
 440 * @res:            The resource for which to allocate a backup buffer.
 441 * @interruptible:  Whether any sleeps during allocation should be
 442 *                  performed while interruptible.
 443 * @val_buf:        On successful return contains data about the
 444 *                  reserved and validated backup buffer.
 445 */
 446static int
 447vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 448                          struct vmw_resource *res,
 449                          bool interruptible,
 450                          struct ttm_validate_buffer *val_buf)
 451{
 452        struct ttm_operation_ctx ctx = { true, false };
 453        struct list_head val_list;
 454        bool backup_dirty = false;
 455        int ret;
 456
 457        if (unlikely(res->backup == NULL)) {
 458                ret = vmw_resource_buf_alloc(res, interruptible);
 459                if (unlikely(ret != 0))
 460                        return ret;
 461        }
 462
 463        INIT_LIST_HEAD(&val_list);
 464        ttm_bo_get(&res->backup->base);
 465        val_buf->bo = &res->backup->base;
 466        val_buf->num_shared = 0;
 467        list_add_tail(&val_buf->head, &val_list);
 468        ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 469        if (unlikely(ret != 0))
 470                goto out_no_reserve;
 471
 472        if (res->func->needs_backup && list_empty(&res->mob_head))
 473                return 0;
 474
 475        backup_dirty = res->backup_dirty;
 476        ret = ttm_bo_validate(&res->backup->base,
 477                              res->func->backup_placement,
 478                              &ctx);
 479
 480        if (unlikely(ret != 0))
 481                goto out_no_validate;
 482
 483        return 0;
 484
 485out_no_validate:
 486        ttm_eu_backoff_reservation(ticket, &val_list);
 487out_no_reserve:
 488        ttm_bo_put(val_buf->bo);
 489        val_buf->bo = NULL;
 490        if (backup_dirty)
 491                vmw_bo_unreference(&res->backup);
 492
 493        return ret;
 494}
 495
 496/**
 497 * vmw_resource_reserve - Reserve a resource for command submission
 498 *
 499 * @res:            The resource to reserve.
 500 *
 501 * This function takes the resource off the LRU list and make sure
 502 * a backup buffer is present for guest-backed resources. However,
 503 * the buffer may not be bound to the resource at this point.
 504 *
 505 */
 506int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 507                         bool no_backup)
 508{
 509        struct vmw_private *dev_priv = res->dev_priv;
 510        int ret;
 511
 512        spin_lock(&dev_priv->resource_lock);
 513        list_del_init(&res->lru_head);
 514        spin_unlock(&dev_priv->resource_lock);
 515
 516        if (res->func->needs_backup && res->backup == NULL &&
 517            !no_backup) {
 518                ret = vmw_resource_buf_alloc(res, interruptible);
 519                if (unlikely(ret != 0)) {
 520                        DRM_ERROR("Failed to allocate a backup buffer "
 521                                  "of size %lu. bytes\n",
 522                                  (unsigned long) res->backup_size);
 523                        return ret;
 524                }
 525        }
 526
 527        return 0;
 528}
 529
 530/**
 531 * vmw_resource_backoff_reservation - Unreserve and unreference a
 532 *                                    backup buffer
 533 *.
 534 * @ticket:         The ww acquire ctx used for reservation.
 535 * @val_buf:        Backup buffer information.
 536 */
 537static void
 538vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 539                                 struct ttm_validate_buffer *val_buf)
 540{
 541        struct list_head val_list;
 542
 543        if (likely(val_buf->bo == NULL))
 544                return;
 545
 546        INIT_LIST_HEAD(&val_list);
 547        list_add_tail(&val_buf->head, &val_list);
 548        ttm_eu_backoff_reservation(ticket, &val_list);
 549        ttm_bo_put(val_buf->bo);
 550        val_buf->bo = NULL;
 551}
 552
 553/**
 554 * vmw_resource_do_evict - Evict a resource, and transfer its data
 555 *                         to a backup buffer.
 556 *
 557 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 558 * @res:            The resource to evict.
 559 * @interruptible:  Whether to wait interruptible.
 560 */
 561static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 562                                 struct vmw_resource *res, bool interruptible)
 563{
 564        struct ttm_validate_buffer val_buf;
 565        const struct vmw_res_func *func = res->func;
 566        int ret;
 567
 568        BUG_ON(!func->may_evict);
 569
 570        val_buf.bo = NULL;
 571        val_buf.num_shared = 0;
 572        ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 573        if (unlikely(ret != 0))
 574                return ret;
 575
 576        if (unlikely(func->unbind != NULL &&
 577                     (!func->needs_backup || !list_empty(&res->mob_head)))) {
 578                ret = func->unbind(res, res->res_dirty, &val_buf);
 579                if (unlikely(ret != 0))
 580                        goto out_no_unbind;
 581                list_del_init(&res->mob_head);
 582        }
 583        ret = func->destroy(res);
 584        res->backup_dirty = true;
 585        res->res_dirty = false;
 586out_no_unbind:
 587        vmw_resource_backoff_reservation(ticket, &val_buf);
 588
 589        return ret;
 590}
 591
 592
 593/**
 594 * vmw_resource_validate - Make a resource up-to-date and visible
 595 *                         to the device.
 596 * @res: The resource to make visible to the device.
 597 * @intr: Perform waits interruptible if possible.
 598 *
 599 * On succesful return, any backup DMA buffer pointed to by @res->backup will
 600 * be reserved and validated.
 601 * On hardware resource shortage, this function will repeatedly evict
 602 * resources of the same type until the validation succeeds.
 603 *
 604 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 605 * on failure.
 606 */
 607int vmw_resource_validate(struct vmw_resource *res, bool intr)
 608{
 609        int ret;
 610        struct vmw_resource *evict_res;
 611        struct vmw_private *dev_priv = res->dev_priv;
 612        struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 613        struct ttm_validate_buffer val_buf;
 614        unsigned err_count = 0;
 615
 616        if (!res->func->create)
 617                return 0;
 618
 619        val_buf.bo = NULL;
 620        val_buf.num_shared = 0;
 621        if (res->backup)
 622                val_buf.bo = &res->backup->base;
 623        do {
 624                ret = vmw_resource_do_validate(res, &val_buf);
 625                if (likely(ret != -EBUSY))
 626                        break;
 627
 628                spin_lock(&dev_priv->resource_lock);
 629                if (list_empty(lru_list) || !res->func->may_evict) {
 630                        DRM_ERROR("Out of device device resources "
 631                                  "for %s.\n", res->func->type_name);
 632                        ret = -EBUSY;
 633                        spin_unlock(&dev_priv->resource_lock);
 634                        break;
 635                }
 636
 637                evict_res = vmw_resource_reference
 638                        (list_first_entry(lru_list, struct vmw_resource,
 639                                          lru_head));
 640                list_del_init(&evict_res->lru_head);
 641
 642                spin_unlock(&dev_priv->resource_lock);
 643
 644                /* Trylock backup buffers with a NULL ticket. */
 645                ret = vmw_resource_do_evict(NULL, evict_res, intr);
 646                if (unlikely(ret != 0)) {
 647                        spin_lock(&dev_priv->resource_lock);
 648                        list_add_tail(&evict_res->lru_head, lru_list);
 649                        spin_unlock(&dev_priv->resource_lock);
 650                        if (ret == -ERESTARTSYS ||
 651                            ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 652                                vmw_resource_unreference(&evict_res);
 653                                goto out_no_validate;
 654                        }
 655                }
 656
 657                vmw_resource_unreference(&evict_res);
 658        } while (1);
 659
 660        if (unlikely(ret != 0))
 661                goto out_no_validate;
 662        else if (!res->func->needs_backup && res->backup) {
 663                list_del_init(&res->mob_head);
 664                vmw_bo_unreference(&res->backup);
 665        }
 666
 667        return 0;
 668
 669out_no_validate:
 670        return ret;
 671}
 672
 673
 674/**
 675 * vmw_resource_unbind_list
 676 *
 677 * @vbo: Pointer to the current backing MOB.
 678 *
 679 * Evicts the Guest Backed hardware resource if the backup
 680 * buffer is being moved out of MOB memory.
 681 * Note that this function will not race with the resource
 682 * validation code, since resource validation and eviction
 683 * both require the backup buffer to be reserved.
 684 */
 685void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 686{
 687
 688        struct vmw_resource *res, *next;
 689        struct ttm_validate_buffer val_buf = {
 690                .bo = &vbo->base,
 691                .num_shared = 0
 692        };
 693
 694        lockdep_assert_held(&vbo->base.resv->lock.base);
 695        list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 696                if (!res->func->unbind)
 697                        continue;
 698
 699                (void) res->func->unbind(res, true, &val_buf);
 700                res->backup_dirty = true;
 701                res->res_dirty = false;
 702                list_del_init(&res->mob_head);
 703        }
 704
 705        (void) ttm_bo_wait(&vbo->base, false, false);
 706}
 707
 708
 709/**
 710 * vmw_query_readback_all - Read back cached query states
 711 *
 712 * @dx_query_mob: Buffer containing the DX query MOB
 713 *
 714 * Read back cached states from the device if they exist.  This function
 715 * assumings binding_mutex is held.
 716 */
 717int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 718{
 719        struct vmw_resource *dx_query_ctx;
 720        struct vmw_private *dev_priv;
 721        struct {
 722                SVGA3dCmdHeader header;
 723                SVGA3dCmdDXReadbackAllQuery body;
 724        } *cmd;
 725
 726
 727        /* No query bound, so do nothing */
 728        if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 729                return 0;
 730
 731        dx_query_ctx = dx_query_mob->dx_query_ctx;
 732        dev_priv     = dx_query_ctx->dev_priv;
 733
 734        cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 735        if (unlikely(cmd == NULL)) {
 736                DRM_ERROR("Failed reserving FIFO space for "
 737                          "query MOB read back.\n");
 738                return -ENOMEM;
 739        }
 740
 741        cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 742        cmd->header.size = sizeof(cmd->body);
 743        cmd->body.cid    = dx_query_ctx->id;
 744
 745        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 746
 747        /* Triggers a rebind the next time affected context is bound */
 748        dx_query_mob->dx_query_ctx = NULL;
 749
 750        return 0;
 751}
 752
 753
 754
 755/**
 756 * vmw_query_move_notify - Read back cached query states
 757 *
 758 * @bo: The TTM buffer object about to move.
 759 * @mem: The memory region @bo is moving to.
 760 *
 761 * Called before the query MOB is swapped out to read back cached query
 762 * states from the device.
 763 */
 764void vmw_query_move_notify(struct ttm_buffer_object *bo,
 765                           struct ttm_mem_reg *mem)
 766{
 767        struct vmw_buffer_object *dx_query_mob;
 768        struct ttm_bo_device *bdev = bo->bdev;
 769        struct vmw_private *dev_priv;
 770
 771
 772        dev_priv = container_of(bdev, struct vmw_private, bdev);
 773
 774        mutex_lock(&dev_priv->binding_mutex);
 775
 776        dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 777        if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
 778                mutex_unlock(&dev_priv->binding_mutex);
 779                return;
 780        }
 781
 782        /* If BO is being moved from MOB to system memory */
 783        if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 784                struct vmw_fence_obj *fence;
 785
 786                (void) vmw_query_readback_all(dx_query_mob);
 787                mutex_unlock(&dev_priv->binding_mutex);
 788
 789                /* Create a fence and attach the BO to it */
 790                (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 791                vmw_bo_fence_single(bo, fence);
 792
 793                if (fence != NULL)
 794                        vmw_fence_obj_unreference(&fence);
 795
 796                (void) ttm_bo_wait(bo, false, false);
 797        } else
 798                mutex_unlock(&dev_priv->binding_mutex);
 799
 800}
 801
 802/**
 803 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 804 *
 805 * @res:            The resource being queried.
 806 */
 807bool vmw_resource_needs_backup(const struct vmw_resource *res)
 808{
 809        return res->func->needs_backup;
 810}
 811
 812/**
 813 * vmw_resource_evict_type - Evict all resources of a specific type
 814 *
 815 * @dev_priv:       Pointer to a device private struct
 816 * @type:           The resource type to evict
 817 *
 818 * To avoid thrashing starvation or as part of the hibernation sequence,
 819 * try to evict all evictable resources of a specific type.
 820 */
 821static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 822                                    enum vmw_res_type type)
 823{
 824        struct list_head *lru_list = &dev_priv->res_lru[type];
 825        struct vmw_resource *evict_res;
 826        unsigned err_count = 0;
 827        int ret;
 828        struct ww_acquire_ctx ticket;
 829
 830        do {
 831                spin_lock(&dev_priv->resource_lock);
 832
 833                if (list_empty(lru_list))
 834                        goto out_unlock;
 835
 836                evict_res = vmw_resource_reference(
 837                        list_first_entry(lru_list, struct vmw_resource,
 838                                         lru_head));
 839                list_del_init(&evict_res->lru_head);
 840                spin_unlock(&dev_priv->resource_lock);
 841
 842                /* Wait lock backup buffers with a ticket. */
 843                ret = vmw_resource_do_evict(&ticket, evict_res, false);
 844                if (unlikely(ret != 0)) {
 845                        spin_lock(&dev_priv->resource_lock);
 846                        list_add_tail(&evict_res->lru_head, lru_list);
 847                        spin_unlock(&dev_priv->resource_lock);
 848                        if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 849                                vmw_resource_unreference(&evict_res);
 850                                return;
 851                        }
 852                }
 853
 854                vmw_resource_unreference(&evict_res);
 855        } while (1);
 856
 857out_unlock:
 858        spin_unlock(&dev_priv->resource_lock);
 859}
 860
 861/**
 862 * vmw_resource_evict_all - Evict all evictable resources
 863 *
 864 * @dev_priv:       Pointer to a device private struct
 865 *
 866 * To avoid thrashing starvation or as part of the hibernation sequence,
 867 * evict all evictable resources. In particular this means that all
 868 * guest-backed resources that are registered with the device are
 869 * evicted and the OTable becomes clean.
 870 */
 871void vmw_resource_evict_all(struct vmw_private *dev_priv)
 872{
 873        enum vmw_res_type type;
 874
 875        mutex_lock(&dev_priv->cmdbuf_mutex);
 876
 877        for (type = 0; type < vmw_res_max; ++type)
 878                vmw_resource_evict_type(dev_priv, type);
 879
 880        mutex_unlock(&dev_priv->cmdbuf_mutex);
 881}
 882
 883/**
 884 * vmw_resource_pin - Add a pin reference on a resource
 885 *
 886 * @res: The resource to add a pin reference on
 887 *
 888 * This function adds a pin reference, and if needed validates the resource.
 889 * Having a pin reference means that the resource can never be evicted, and
 890 * its id will never change as long as there is a pin reference.
 891 * This function returns 0 on success and a negative error code on failure.
 892 */
 893int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 894{
 895        struct ttm_operation_ctx ctx = { interruptible, false };
 896        struct vmw_private *dev_priv = res->dev_priv;
 897        int ret;
 898
 899        ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 900        mutex_lock(&dev_priv->cmdbuf_mutex);
 901        ret = vmw_resource_reserve(res, interruptible, false);
 902        if (ret)
 903                goto out_no_reserve;
 904
 905        if (res->pin_count == 0) {
 906                struct vmw_buffer_object *vbo = NULL;
 907
 908                if (res->backup) {
 909                        vbo = res->backup;
 910
 911                        ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 912                        if (!vbo->pin_count) {
 913                                ret = ttm_bo_validate
 914                                        (&vbo->base,
 915                                         res->func->backup_placement,
 916                                         &ctx);
 917                                if (ret) {
 918                                        ttm_bo_unreserve(&vbo->base);
 919                                        goto out_no_validate;
 920                                }
 921                        }
 922
 923                        /* Do we really need to pin the MOB as well? */
 924                        vmw_bo_pin_reserved(vbo, true);
 925                }
 926                ret = vmw_resource_validate(res, interruptible);
 927                if (vbo)
 928                        ttm_bo_unreserve(&vbo->base);
 929                if (ret)
 930                        goto out_no_validate;
 931        }
 932        res->pin_count++;
 933
 934out_no_validate:
 935        vmw_resource_unreserve(res, false, NULL, 0UL);
 936out_no_reserve:
 937        mutex_unlock(&dev_priv->cmdbuf_mutex);
 938        ttm_write_unlock(&dev_priv->reservation_sem);
 939
 940        return ret;
 941}
 942
 943/**
 944 * vmw_resource_unpin - Remove a pin reference from a resource
 945 *
 946 * @res: The resource to remove a pin reference from
 947 *
 948 * Having a pin reference means that the resource can never be evicted, and
 949 * its id will never change as long as there is a pin reference.
 950 */
 951void vmw_resource_unpin(struct vmw_resource *res)
 952{
 953        struct vmw_private *dev_priv = res->dev_priv;
 954        int ret;
 955
 956        (void) ttm_read_lock(&dev_priv->reservation_sem, false);
 957        mutex_lock(&dev_priv->cmdbuf_mutex);
 958
 959        ret = vmw_resource_reserve(res, false, true);
 960        WARN_ON(ret);
 961
 962        WARN_ON(res->pin_count == 0);
 963        if (--res->pin_count == 0 && res->backup) {
 964                struct vmw_buffer_object *vbo = res->backup;
 965
 966                (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
 967                vmw_bo_pin_reserved(vbo, false);
 968                ttm_bo_unreserve(&vbo->base);
 969        }
 970
 971        vmw_resource_unreserve(res, false, NULL, 0UL);
 972
 973        mutex_unlock(&dev_priv->cmdbuf_mutex);
 974        ttm_read_unlock(&dev_priv->reservation_sem);
 975}
 976
 977/**
 978 * vmw_res_type - Return the resource type
 979 *
 980 * @res: Pointer to the resource
 981 */
 982enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
 983{
 984        return res->func->res_type;
 985}
 986