linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include <drm/vmwgfx_drm.h>
  30#include <drm/ttm/ttm_placement.h>
  31#include <drm/drmP.h>
  32#include "vmwgfx_resource_priv.h"
  33#include "vmwgfx_binding.h"
  34
  35#define VMW_RES_EVICT_ERR_COUNT 10
  36
  37struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  38{
  39        kref_get(&res->kref);
  40        return res;
  41}
  42
  43struct vmw_resource *
  44vmw_resource_reference_unless_doomed(struct vmw_resource *res)
  45{
  46        return kref_get_unless_zero(&res->kref) ? res : NULL;
  47}
  48
  49/**
  50 * vmw_resource_release_id - release a resource id to the id manager.
  51 *
  52 * @res: Pointer to the resource.
  53 *
  54 * Release the resource id to the resource id manager and set it to -1
  55 */
  56void vmw_resource_release_id(struct vmw_resource *res)
  57{
  58        struct vmw_private *dev_priv = res->dev_priv;
  59        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  60
  61        spin_lock(&dev_priv->resource_lock);
  62        if (res->id != -1)
  63                idr_remove(idr, res->id);
  64        res->id = -1;
  65        spin_unlock(&dev_priv->resource_lock);
  66}
  67
  68static void vmw_resource_release(struct kref *kref)
  69{
  70        struct vmw_resource *res =
  71            container_of(kref, struct vmw_resource, kref);
  72        struct vmw_private *dev_priv = res->dev_priv;
  73        int id;
  74        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
  75
  76        spin_lock(&dev_priv->resource_lock);
  77        list_del_init(&res->lru_head);
  78        spin_unlock(&dev_priv->resource_lock);
  79        if (res->backup) {
  80                struct ttm_buffer_object *bo = &res->backup->base;
  81
  82                ttm_bo_reserve(bo, false, false, NULL);
  83                if (!list_empty(&res->mob_head) &&
  84                    res->func->unbind != NULL) {
  85                        struct ttm_validate_buffer val_buf;
  86
  87                        val_buf.bo = bo;
  88                        val_buf.shared = false;
  89                        res->func->unbind(res, false, &val_buf);
  90                }
  91                res->backup_dirty = false;
  92                list_del_init(&res->mob_head);
  93                ttm_bo_unreserve(bo);
  94                vmw_bo_unreference(&res->backup);
  95        }
  96
  97        if (likely(res->hw_destroy != NULL)) {
  98                mutex_lock(&dev_priv->binding_mutex);
  99                vmw_binding_res_list_kill(&res->binding_head);
 100                mutex_unlock(&dev_priv->binding_mutex);
 101                res->hw_destroy(res);
 102        }
 103
 104        id = res->id;
 105        if (res->res_free != NULL)
 106                res->res_free(res);
 107        else
 108                kfree(res);
 109
 110        spin_lock(&dev_priv->resource_lock);
 111        if (id != -1)
 112                idr_remove(idr, id);
 113        spin_unlock(&dev_priv->resource_lock);
 114}
 115
 116void vmw_resource_unreference(struct vmw_resource **p_res)
 117{
 118        struct vmw_resource *res = *p_res;
 119
 120        *p_res = NULL;
 121        kref_put(&res->kref, vmw_resource_release);
 122}
 123
 124
 125/**
 126 * vmw_resource_alloc_id - release a resource id to the id manager.
 127 *
 128 * @res: Pointer to the resource.
 129 *
 130 * Allocate the lowest free resource from the resource manager, and set
 131 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
 132 */
 133int vmw_resource_alloc_id(struct vmw_resource *res)
 134{
 135        struct vmw_private *dev_priv = res->dev_priv;
 136        int ret;
 137        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 138
 139        BUG_ON(res->id != -1);
 140
 141        idr_preload(GFP_KERNEL);
 142        spin_lock(&dev_priv->resource_lock);
 143
 144        ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
 145        if (ret >= 0)
 146                res->id = ret;
 147
 148        spin_unlock(&dev_priv->resource_lock);
 149        idr_preload_end();
 150        return ret < 0 ? ret : 0;
 151}
 152
 153/**
 154 * vmw_resource_init - initialize a struct vmw_resource
 155 *
 156 * @dev_priv:       Pointer to a device private struct.
 157 * @res:            The struct vmw_resource to initialize.
 158 * @obj_type:       Resource object type.
 159 * @delay_id:       Boolean whether to defer device id allocation until
 160 *                  the first validation.
 161 * @res_free:       Resource destructor.
 162 * @func:           Resource function table.
 163 */
 164int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
 165                      bool delay_id,
 166                      void (*res_free) (struct vmw_resource *res),
 167                      const struct vmw_res_func *func)
 168{
 169        kref_init(&res->kref);
 170        res->hw_destroy = NULL;
 171        res->res_free = res_free;
 172        res->dev_priv = dev_priv;
 173        res->func = func;
 174        INIT_LIST_HEAD(&res->lru_head);
 175        INIT_LIST_HEAD(&res->mob_head);
 176        INIT_LIST_HEAD(&res->binding_head);
 177        res->id = -1;
 178        res->backup = NULL;
 179        res->backup_offset = 0;
 180        res->backup_dirty = false;
 181        res->res_dirty = false;
 182        if (delay_id)
 183                return 0;
 184        else
 185                return vmw_resource_alloc_id(res);
 186}
 187
 188
 189/**
 190 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 191 * TTM user-space handle and perform basic type checks
 192 *
 193 * @dev_priv:     Pointer to a device private struct
 194 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 195 * @handle:       The TTM user-space handle
 196 * @converter:    Pointer to an object describing the resource type
 197 * @p_res:        On successful return the location pointed to will contain
 198 *                a pointer to a refcounted struct vmw_resource.
 199 *
 200 * If the handle can't be found or is associated with an incorrect resource
 201 * type, -EINVAL will be returned.
 202 */
 203int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
 204                                    struct ttm_object_file *tfile,
 205                                    uint32_t handle,
 206                                    const struct vmw_user_resource_conv
 207                                    *converter,
 208                                    struct vmw_resource **p_res)
 209{
 210        struct ttm_base_object *base;
 211        struct vmw_resource *res;
 212        int ret = -EINVAL;
 213
 214        base = ttm_base_object_lookup(tfile, handle);
 215        if (unlikely(base == NULL))
 216                return -EINVAL;
 217
 218        if (unlikely(ttm_base_object_type(base) != converter->object_type))
 219                goto out_bad_resource;
 220
 221        res = converter->base_obj_to_res(base);
 222        kref_get(&res->kref);
 223
 224        *p_res = res;
 225        ret = 0;
 226
 227out_bad_resource:
 228        ttm_base_object_unref(&base);
 229
 230        return ret;
 231}
 232
 233/**
 234 * vmw_user_resource_lookup_handle - lookup a struct resource from a
 235 * TTM user-space handle and perform basic type checks
 236 *
 237 * @dev_priv:     Pointer to a device private struct
 238 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
 239 * @handle:       The TTM user-space handle
 240 * @converter:    Pointer to an object describing the resource type
 241 * @p_res:        On successful return the location pointed to will contain
 242 *                a pointer to a refcounted struct vmw_resource.
 243 *
 244 * If the handle can't be found or is associated with an incorrect resource
 245 * type, -EINVAL will be returned.
 246 */
 247struct vmw_resource *
 248vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
 249                                      struct ttm_object_file *tfile,
 250                                      uint32_t handle,
 251                                      const struct vmw_user_resource_conv
 252                                      *converter)
 253{
 254        struct ttm_base_object *base;
 255
 256        base = ttm_base_object_noref_lookup(tfile, handle);
 257        if (!base)
 258                return ERR_PTR(-ESRCH);
 259
 260        if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
 261                ttm_base_object_noref_release();
 262                return ERR_PTR(-EINVAL);
 263        }
 264
 265        return converter->base_obj_to_res(base);
 266}
 267
 268/**
 269 * Helper function that looks either a surface or bo.
 270 *
 271 * The pointer this pointed at by out_surf and out_buf needs to be null.
 272 */
 273int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 274                           struct ttm_object_file *tfile,
 275                           uint32_t handle,
 276                           struct vmw_surface **out_surf,
 277                           struct vmw_buffer_object **out_buf)
 278{
 279        struct vmw_resource *res;
 280        int ret;
 281
 282        BUG_ON(*out_surf || *out_buf);
 283
 284        ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
 285                                              user_surface_converter,
 286                                              &res);
 287        if (!ret) {
 288                *out_surf = vmw_res_to_srf(res);
 289                return 0;
 290        }
 291
 292        *out_surf = NULL;
 293        ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
 294        return ret;
 295}
 296
 297/**
 298 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
 299 *
 300 * @res:            The resource for which to allocate a backup buffer.
 301 * @interruptible:  Whether any sleeps during allocation should be
 302 *                  performed while interruptible.
 303 */
 304static int vmw_resource_buf_alloc(struct vmw_resource *res,
 305                                  bool interruptible)
 306{
 307        unsigned long size =
 308                (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 309        struct vmw_buffer_object *backup;
 310        int ret;
 311
 312        if (likely(res->backup)) {
 313                BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
 314                return 0;
 315        }
 316
 317        backup = kzalloc(sizeof(*backup), GFP_KERNEL);
 318        if (unlikely(!backup))
 319                return -ENOMEM;
 320
 321        ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
 322                              res->func->backup_placement,
 323                              interruptible,
 324                              &vmw_bo_bo_free);
 325        if (unlikely(ret != 0))
 326                goto out_no_bo;
 327
 328        res->backup = backup;
 329
 330out_no_bo:
 331        return ret;
 332}
 333
 334/**
 335 * vmw_resource_do_validate - Make a resource up-to-date and visible
 336 *                            to the device.
 337 *
 338 * @res:            The resource to make visible to the device.
 339 * @val_buf:        Information about a buffer possibly
 340 *                  containing backup data if a bind operation is needed.
 341 *
 342 * On hardware resource shortage, this function returns -EBUSY and
 343 * should be retried once resources have been freed up.
 344 */
 345static int vmw_resource_do_validate(struct vmw_resource *res,
 346                                    struct ttm_validate_buffer *val_buf)
 347{
 348        int ret = 0;
 349        const struct vmw_res_func *func = res->func;
 350
 351        if (unlikely(res->id == -1)) {
 352                ret = func->create(res);
 353                if (unlikely(ret != 0))
 354                        return ret;
 355        }
 356
 357        if (func->bind &&
 358            ((func->needs_backup && list_empty(&res->mob_head) &&
 359              val_buf->bo != NULL) ||
 360             (!func->needs_backup && val_buf->bo != NULL))) {
 361                ret = func->bind(res, val_buf);
 362                if (unlikely(ret != 0))
 363                        goto out_bind_failed;
 364                if (func->needs_backup)
 365                        list_add_tail(&res->mob_head, &res->backup->res_list);
 366        }
 367
 368        /*
 369         * Only do this on write operations, and move to
 370         * vmw_resource_unreserve if it can be called after
 371         * backup buffers have been unreserved. Otherwise
 372         * sort out locking.
 373         */
 374        res->res_dirty = true;
 375
 376        return 0;
 377
 378out_bind_failed:
 379        func->destroy(res);
 380
 381        return ret;
 382}
 383
 384/**
 385 * vmw_resource_unreserve - Unreserve a resource previously reserved for
 386 * command submission.
 387 *
 388 * @res:               Pointer to the struct vmw_resource to unreserve.
 389 * @switch_backup:     Backup buffer has been switched.
 390 * @new_backup:        Pointer to new backup buffer if command submission
 391 *                     switched. May be NULL.
 392 * @new_backup_offset: New backup offset if @switch_backup is true.
 393 *
 394 * Currently unreserving a resource means putting it back on the device's
 395 * resource lru list, so that it can be evicted if necessary.
 396 */
 397void vmw_resource_unreserve(struct vmw_resource *res,
 398                            bool switch_backup,
 399                            struct vmw_buffer_object *new_backup,
 400                            unsigned long new_backup_offset)
 401{
 402        struct vmw_private *dev_priv = res->dev_priv;
 403
 404        if (!list_empty(&res->lru_head))
 405                return;
 406
 407        if (switch_backup && new_backup != res->backup) {
 408                if (res->backup) {
 409                        lockdep_assert_held(&res->backup->base.resv->lock.base);
 410                        list_del_init(&res->mob_head);
 411                        vmw_bo_unreference(&res->backup);
 412                }
 413
 414                if (new_backup) {
 415                        res->backup = vmw_bo_reference(new_backup);
 416                        lockdep_assert_held(&new_backup->base.resv->lock.base);
 417                        list_add_tail(&res->mob_head, &new_backup->res_list);
 418                } else {
 419                        res->backup = NULL;
 420                }
 421        }
 422        if (switch_backup)
 423                res->backup_offset = new_backup_offset;
 424
 425        if (!res->func->may_evict || res->id == -1 || res->pin_count)
 426                return;
 427
 428        spin_lock(&dev_priv->resource_lock);
 429        list_add_tail(&res->lru_head,
 430                      &res->dev_priv->res_lru[res->func->res_type]);
 431        spin_unlock(&dev_priv->resource_lock);
 432}
 433
 434/**
 435 * vmw_resource_check_buffer - Check whether a backup buffer is needed
 436 *                             for a resource and in that case, allocate
 437 *                             one, reserve and validate it.
 438 *
 439 * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
 440 * @res:            The resource for which to allocate a backup buffer.
 441 * @interruptible:  Whether any sleeps during allocation should be
 442 *                  performed while interruptible.
 443 * @val_buf:        On successful return contains data about the
 444 *                  reserved and validated backup buffer.
 445 */
 446static int
 447vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 448                          struct vmw_resource *res,
 449                          bool interruptible,
 450                          struct ttm_validate_buffer *val_buf)
 451{
 452        struct ttm_operation_ctx ctx = { true, false };
 453        struct list_head val_list;
 454        bool backup_dirty = false;
 455        int ret;
 456
 457        if (unlikely(res->backup == NULL)) {
 458                ret = vmw_resource_buf_alloc(res, interruptible);
 459                if (unlikely(ret != 0))
 460                        return ret;
 461        }
 462
 463        INIT_LIST_HEAD(&val_list);
 464        val_buf->bo = ttm_bo_reference(&res->backup->base);
 465        val_buf->shared = false;
 466        list_add_tail(&val_buf->head, &val_list);
 467        ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 468        if (unlikely(ret != 0))
 469                goto out_no_reserve;
 470
 471        if (res->func->needs_backup && list_empty(&res->mob_head))
 472                return 0;
 473
 474        backup_dirty = res->backup_dirty;
 475        ret = ttm_bo_validate(&res->backup->base,
 476                              res->func->backup_placement,
 477                              &ctx);
 478
 479        if (unlikely(ret != 0))
 480                goto out_no_validate;
 481
 482        return 0;
 483
 484out_no_validate:
 485        ttm_eu_backoff_reservation(ticket, &val_list);
 486out_no_reserve:
 487        ttm_bo_unref(&val_buf->bo);
 488        if (backup_dirty)
 489                vmw_bo_unreference(&res->backup);
 490
 491        return ret;
 492}
 493
 494/**
 495 * vmw_resource_reserve - Reserve a resource for command submission
 496 *
 497 * @res:            The resource to reserve.
 498 *
 499 * This function takes the resource off the LRU list and make sure
 500 * a backup buffer is present for guest-backed resources. However,
 501 * the buffer may not be bound to the resource at this point.
 502 *
 503 */
 504int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
 505                         bool no_backup)
 506{
 507        struct vmw_private *dev_priv = res->dev_priv;
 508        int ret;
 509
 510        spin_lock(&dev_priv->resource_lock);
 511        list_del_init(&res->lru_head);
 512        spin_unlock(&dev_priv->resource_lock);
 513
 514        if (res->func->needs_backup && res->backup == NULL &&
 515            !no_backup) {
 516                ret = vmw_resource_buf_alloc(res, interruptible);
 517                if (unlikely(ret != 0)) {
 518                        DRM_ERROR("Failed to allocate a backup buffer "
 519                                  "of size %lu. bytes\n",
 520                                  (unsigned long) res->backup_size);
 521                        return ret;
 522                }
 523        }
 524
 525        return 0;
 526}
 527
 528/**
 529 * vmw_resource_backoff_reservation - Unreserve and unreference a
 530 *                                    backup buffer
 531 *.
 532 * @ticket:         The ww acquire ctx used for reservation.
 533 * @val_buf:        Backup buffer information.
 534 */
 535static void
 536vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 537                                 struct ttm_validate_buffer *val_buf)
 538{
 539        struct list_head val_list;
 540
 541        if (likely(val_buf->bo == NULL))
 542                return;
 543
 544        INIT_LIST_HEAD(&val_list);
 545        list_add_tail(&val_buf->head, &val_list);
 546        ttm_eu_backoff_reservation(ticket, &val_list);
 547        ttm_bo_unref(&val_buf->bo);
 548}
 549
 550/**
 551 * vmw_resource_do_evict - Evict a resource, and transfer its data
 552 *                         to a backup buffer.
 553 *
 554 * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
 555 * @res:            The resource to evict.
 556 * @interruptible:  Whether to wait interruptible.
 557 */
 558static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
 559                                 struct vmw_resource *res, bool interruptible)
 560{
 561        struct ttm_validate_buffer val_buf;
 562        const struct vmw_res_func *func = res->func;
 563        int ret;
 564
 565        BUG_ON(!func->may_evict);
 566
 567        val_buf.bo = NULL;
 568        val_buf.shared = false;
 569        ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 570        if (unlikely(ret != 0))
 571                return ret;
 572
 573        if (unlikely(func->unbind != NULL &&
 574                     (!func->needs_backup || !list_empty(&res->mob_head)))) {
 575                ret = func->unbind(res, res->res_dirty, &val_buf);
 576                if (unlikely(ret != 0))
 577                        goto out_no_unbind;
 578                list_del_init(&res->mob_head);
 579        }
 580        ret = func->destroy(res);
 581        res->backup_dirty = true;
 582        res->res_dirty = false;
 583out_no_unbind:
 584        vmw_resource_backoff_reservation(ticket, &val_buf);
 585
 586        return ret;
 587}
 588
 589
 590/**
 591 * vmw_resource_validate - Make a resource up-to-date and visible
 592 *                         to the device.
 593 * @res: The resource to make visible to the device.
 594 * @intr: Perform waits interruptible if possible.
 595 *
 596 * On succesful return, any backup DMA buffer pointed to by @res->backup will
 597 * be reserved and validated.
 598 * On hardware resource shortage, this function will repeatedly evict
 599 * resources of the same type until the validation succeeds.
 600 *
 601 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 602 * on failure.
 603 */
 604int vmw_resource_validate(struct vmw_resource *res, bool intr)
 605{
 606        int ret;
 607        struct vmw_resource *evict_res;
 608        struct vmw_private *dev_priv = res->dev_priv;
 609        struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
 610        struct ttm_validate_buffer val_buf;
 611        unsigned err_count = 0;
 612
 613        if (!res->func->create)
 614                return 0;
 615
 616        val_buf.bo = NULL;
 617        val_buf.shared = false;
 618        if (res->backup)
 619                val_buf.bo = &res->backup->base;
 620        do {
 621                ret = vmw_resource_do_validate(res, &val_buf);
 622                if (likely(ret != -EBUSY))
 623                        break;
 624
 625                spin_lock(&dev_priv->resource_lock);
 626                if (list_empty(lru_list) || !res->func->may_evict) {
 627                        DRM_ERROR("Out of device device resources "
 628                                  "for %s.\n", res->func->type_name);
 629                        ret = -EBUSY;
 630                        spin_unlock(&dev_priv->resource_lock);
 631                        break;
 632                }
 633
 634                evict_res = vmw_resource_reference
 635                        (list_first_entry(lru_list, struct vmw_resource,
 636                                          lru_head));
 637                list_del_init(&evict_res->lru_head);
 638
 639                spin_unlock(&dev_priv->resource_lock);
 640
 641                /* Trylock backup buffers with a NULL ticket. */
 642                ret = vmw_resource_do_evict(NULL, evict_res, intr);
 643                if (unlikely(ret != 0)) {
 644                        spin_lock(&dev_priv->resource_lock);
 645                        list_add_tail(&evict_res->lru_head, lru_list);
 646                        spin_unlock(&dev_priv->resource_lock);
 647                        if (ret == -ERESTARTSYS ||
 648                            ++err_count > VMW_RES_EVICT_ERR_COUNT) {
 649                                vmw_resource_unreference(&evict_res);
 650                                goto out_no_validate;
 651                        }
 652                }
 653
 654                vmw_resource_unreference(&evict_res);
 655        } while (1);
 656
 657        if (unlikely(ret != 0))
 658                goto out_no_validate;
 659        else if (!res->func->needs_backup && res->backup) {
 660                list_del_init(&res->mob_head);
 661                vmw_bo_unreference(&res->backup);
 662        }
 663
 664        return 0;
 665
 666out_no_validate:
 667        return ret;
 668}
 669
 670
 671/**
 672 * vmw_resource_unbind_list
 673 *
 674 * @vbo: Pointer to the current backing MOB.
 675 *
 676 * Evicts the Guest Backed hardware resource if the backup
 677 * buffer is being moved out of MOB memory.
 678 * Note that this function will not race with the resource
 679 * validation code, since resource validation and eviction
 680 * both require the backup buffer to be reserved.
 681 */
 682void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 683{
 684
 685        struct vmw_resource *res, *next;
 686        struct ttm_validate_buffer val_buf = {
 687                .bo = &vbo->base,
 688                .shared = false
 689        };
 690
 691        lockdep_assert_held(&vbo->base.resv->lock.base);
 692        list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 693                if (!res->func->unbind)
 694                        continue;
 695
 696                (void) res->func->unbind(res, true, &val_buf);
 697                res->backup_dirty = true;
 698                res->res_dirty = false;
 699                list_del_init(&res->mob_head);
 700        }
 701
 702        (void) ttm_bo_wait(&vbo->base, false, false);
 703}
 704
 705
 706/**
 707 * vmw_query_readback_all - Read back cached query states
 708 *
 709 * @dx_query_mob: Buffer containing the DX query MOB
 710 *
 711 * Read back cached states from the device if they exist.  This function
 712 * assumings binding_mutex is held.
 713 */
 714int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 715{
 716        struct vmw_resource *dx_query_ctx;
 717        struct vmw_private *dev_priv;
 718        struct {
 719                SVGA3dCmdHeader header;
 720                SVGA3dCmdDXReadbackAllQuery body;
 721        } *cmd;
 722
 723
 724        /* No query bound, so do nothing */
 725        if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
 726                return 0;
 727
 728        dx_query_ctx = dx_query_mob->dx_query_ctx;
 729        dev_priv     = dx_query_ctx->dev_priv;
 730
 731        cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
 732        if (unlikely(cmd == NULL)) {
 733                DRM_ERROR("Failed reserving FIFO space for "
 734                          "query MOB read back.\n");
 735                return -ENOMEM;
 736        }
 737
 738        cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
 739        cmd->header.size = sizeof(cmd->body);
 740        cmd->body.cid    = dx_query_ctx->id;
 741
 742        vmw_fifo_commit(dev_priv, sizeof(*cmd));
 743
 744        /* Triggers a rebind the next time affected context is bound */
 745        dx_query_mob->dx_query_ctx = NULL;
 746
 747        return 0;
 748}
 749
 750
 751
 752/**
 753 * vmw_query_move_notify - Read back cached query states
 754 *
 755 * @bo: The TTM buffer object about to move.
 756 * @mem: The memory region @bo is moving to.
 757 *
 758 * Called before the query MOB is swapped out to read back cached query
 759 * states from the device.
 760 */
 761void vmw_query_move_notify(struct ttm_buffer_object *bo,
 762                           struct ttm_mem_reg *mem)
 763{
 764        struct vmw_buffer_object *dx_query_mob;
 765        struct ttm_bo_device *bdev = bo->bdev;
 766        struct vmw_private *dev_priv;
 767
 768
 769        dev_priv = container_of(bdev, struct vmw_private, bdev);
 770
 771        mutex_lock(&dev_priv->binding_mutex);
 772
 773        dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
 774        if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
 775                mutex_unlock(&dev_priv->binding_mutex);
 776                return;
 777        }
 778
 779        /* If BO is being moved from MOB to system memory */
 780        if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
 781                struct vmw_fence_obj *fence;
 782
 783                (void) vmw_query_readback_all(dx_query_mob);
 784                mutex_unlock(&dev_priv->binding_mutex);
 785
 786                /* Create a fence and attach the BO to it */
 787                (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 788                vmw_bo_fence_single(bo, fence);
 789
 790                if (fence != NULL)
 791                        vmw_fence_obj_unreference(&fence);
 792
 793                (void) ttm_bo_wait(bo, false, false);
 794        } else
 795                mutex_unlock(&dev_priv->binding_mutex);
 796
 797}
 798
 799/**
 800 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
 801 *
 802 * @res:            The resource being queried.
 803 */
 804bool vmw_resource_needs_backup(const struct vmw_resource *res)
 805{
 806        return res->func->needs_backup;
 807}
 808
 809/**
 810 * vmw_resource_evict_type - Evict all resources of a specific type
 811 *
 812 * @dev_priv:       Pointer to a device private struct
 813 * @type:           The resource type to evict
 814 *
 815 * To avoid thrashing starvation or as part of the hibernation sequence,
 816 * try to evict all evictable resources of a specific type.
 817 */
 818static void vmw_resource_evict_type(struct vmw_private *dev_priv,
 819                                    enum vmw_res_type type)
 820{
 821        struct list_head *lru_list = &dev_priv->res_lru[type];
 822        struct vmw_resource *evict_res;
 823        unsigned err_count = 0;
 824        int ret;
 825        struct ww_acquire_ctx ticket;
 826
 827        do {
 828                spin_lock(&dev_priv->resource_lock);
 829
 830                if (list_empty(lru_list))
 831                        goto out_unlock;
 832
 833                evict_res = vmw_resource_reference(
 834                        list_first_entry(lru_list, struct vmw_resource,
 835                                         lru_head));
 836                list_del_init(&evict_res->lru_head);
 837                spin_unlock(&dev_priv->resource_lock);
 838
 839                /* Wait lock backup buffers with a ticket. */
 840                ret = vmw_resource_do_evict(&ticket, evict_res, false);
 841                if (unlikely(ret != 0)) {
 842                        spin_lock(&dev_priv->resource_lock);
 843                        list_add_tail(&evict_res->lru_head, lru_list);
 844                        spin_unlock(&dev_priv->resource_lock);
 845                        if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
 846                                vmw_resource_unreference(&evict_res);
 847                                return;
 848                        }
 849                }
 850
 851                vmw_resource_unreference(&evict_res);
 852        } while (1);
 853
 854out_unlock:
 855        spin_unlock(&dev_priv->resource_lock);
 856}
 857
 858/**
 859 * vmw_resource_evict_all - Evict all evictable resources
 860 *
 861 * @dev_priv:       Pointer to a device private struct
 862 *
 863 * To avoid thrashing starvation or as part of the hibernation sequence,
 864 * evict all evictable resources. In particular this means that all
 865 * guest-backed resources that are registered with the device are
 866 * evicted and the OTable becomes clean.
 867 */
 868void vmw_resource_evict_all(struct vmw_private *dev_priv)
 869{
 870        enum vmw_res_type type;
 871
 872        mutex_lock(&dev_priv->cmdbuf_mutex);
 873
 874        for (type = 0; type < vmw_res_max; ++type)
 875                vmw_resource_evict_type(dev_priv, type);
 876
 877        mutex_unlock(&dev_priv->cmdbuf_mutex);
 878}
 879
 880/**
 881 * vmw_resource_pin - Add a pin reference on a resource
 882 *
 883 * @res: The resource to add a pin reference on
 884 *
 885 * This function adds a pin reference, and if needed validates the resource.
 886 * Having a pin reference means that the resource can never be evicted, and
 887 * its id will never change as long as there is a pin reference.
 888 * This function returns 0 on success and a negative error code on failure.
 889 */
 890int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 891{
 892        struct ttm_operation_ctx ctx = { interruptible, false };
 893        struct vmw_private *dev_priv = res->dev_priv;
 894        int ret;
 895
 896        ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 897        mutex_lock(&dev_priv->cmdbuf_mutex);
 898        ret = vmw_resource_reserve(res, interruptible, false);
 899        if (ret)
 900                goto out_no_reserve;
 901
 902        if (res->pin_count == 0) {
 903                struct vmw_buffer_object *vbo = NULL;
 904
 905                if (res->backup) {
 906                        vbo = res->backup;
 907
 908                        ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 909                        if (!vbo->pin_count) {
 910                                ret = ttm_bo_validate
 911                                        (&vbo->base,
 912                                         res->func->backup_placement,
 913                                         &ctx);
 914                                if (ret) {
 915                                        ttm_bo_unreserve(&vbo->base);
 916                                        goto out_no_validate;
 917                                }
 918                        }
 919
 920                        /* Do we really need to pin the MOB as well? */
 921                        vmw_bo_pin_reserved(vbo, true);
 922                }
 923                ret = vmw_resource_validate(res, interruptible);
 924                if (vbo)
 925                        ttm_bo_unreserve(&vbo->base);
 926                if (ret)
 927                        goto out_no_validate;
 928        }
 929        res->pin_count++;
 930
 931out_no_validate:
 932        vmw_resource_unreserve(res, false, NULL, 0UL);
 933out_no_reserve:
 934        mutex_unlock(&dev_priv->cmdbuf_mutex);
 935        ttm_write_unlock(&dev_priv->reservation_sem);
 936
 937        return ret;
 938}
 939
 940/**
 941 * vmw_resource_unpin - Remove a pin reference from a resource
 942 *
 943 * @res: The resource to remove a pin reference from
 944 *
 945 * Having a pin reference means that the resource can never be evicted, and
 946 * its id will never change as long as there is a pin reference.
 947 */
 948void vmw_resource_unpin(struct vmw_resource *res)
 949{
 950        struct vmw_private *dev_priv = res->dev_priv;
 951        int ret;
 952
 953        (void) ttm_read_lock(&dev_priv->reservation_sem, false);
 954        mutex_lock(&dev_priv->cmdbuf_mutex);
 955
 956        ret = vmw_resource_reserve(res, false, true);
 957        WARN_ON(ret);
 958
 959        WARN_ON(res->pin_count == 0);
 960        if (--res->pin_count == 0 && res->backup) {
 961                struct vmw_buffer_object *vbo = res->backup;
 962
 963                (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
 964                vmw_bo_pin_reserved(vbo, false);
 965                ttm_bo_unreserve(&vbo->base);
 966        }
 967
 968        vmw_resource_unreserve(res, false, NULL, 0UL);
 969
 970        mutex_unlock(&dev_priv->cmdbuf_mutex);
 971        ttm_read_unlock(&dev_priv->reservation_sem);
 972}
 973
 974/**
 975 * vmw_res_type - Return the resource type
 976 *
 977 * @res: Pointer to the resource
 978 */
 979enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
 980{
 981        return res->func->res_type;
 982}
 983