linux/drivers/gpu/drm/ttm/ttm_execbuf_util.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_execbuf_util.h>
  30#include <drm/ttm/ttm_bo_driver.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include <linux/wait.h>
  33#include <linux/sched.h>
  34#include <linux/module.h>
  35
  36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
  37                                              struct ttm_validate_buffer *entry)
  38{
  39        list_for_each_entry_continue_reverse(entry, list, head) {
  40                struct ttm_buffer_object *bo = entry->bo;
  41
  42                reservation_object_unlock(bo->resv);
  43        }
  44}
  45
  46static void ttm_eu_del_from_lru_locked(struct list_head *list)
  47{
  48        struct ttm_validate_buffer *entry;
  49
  50        list_for_each_entry(entry, list, head) {
  51                struct ttm_buffer_object *bo = entry->bo;
  52                ttm_bo_del_from_lru(bo);
  53        }
  54}
  55
  56void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  57                                struct list_head *list)
  58{
  59        struct ttm_validate_buffer *entry;
  60        struct ttm_bo_global *glob;
  61
  62        if (list_empty(list))
  63                return;
  64
  65        entry = list_first_entry(list, struct ttm_validate_buffer, head);
  66        glob = entry->bo->bdev->glob;
  67
  68        spin_lock(&glob->lru_lock);
  69        list_for_each_entry(entry, list, head) {
  70                struct ttm_buffer_object *bo = entry->bo;
  71
  72                ttm_bo_add_to_lru(bo);
  73                reservation_object_unlock(bo->resv);
  74        }
  75        spin_unlock(&glob->lru_lock);
  76
  77        if (ticket)
  78                ww_acquire_fini(ticket);
  79}
  80EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  81
  82/*
  83 * Reserve buffers for validation.
  84 *
  85 * If a buffer in the list is marked for CPU access, we back off and
  86 * wait for that buffer to become free for GPU access.
  87 *
  88 * If a buffer is reserved for another validation, the validator with
  89 * the highest validation sequence backs off and waits for that buffer
  90 * to become unreserved. This prevents deadlocks when validating multiple
  91 * buffers in different orders.
  92 */
  93
  94int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
  95                           struct list_head *list, bool intr,
  96                           struct list_head *dups)
  97{
  98        struct ttm_bo_global *glob;
  99        struct ttm_validate_buffer *entry;
 100        int ret;
 101
 102        if (list_empty(list))
 103                return 0;
 104
 105        entry = list_first_entry(list, struct ttm_validate_buffer, head);
 106        glob = entry->bo->bdev->glob;
 107
 108        if (ticket)
 109                ww_acquire_init(ticket, &reservation_ww_class);
 110
 111        list_for_each_entry(entry, list, head) {
 112                struct ttm_buffer_object *bo = entry->bo;
 113
 114                ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 115                if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
 116                        reservation_object_unlock(bo->resv);
 117
 118                        ret = -EBUSY;
 119
 120                } else if (ret == -EALREADY && dups) {
 121                        struct ttm_validate_buffer *safe = entry;
 122                        entry = list_prev_entry(entry, head);
 123                        list_del(&safe->head);
 124                        list_add(&safe->head, dups);
 125                        continue;
 126                }
 127
 128                if (!ret) {
 129                        if (!entry->num_shared)
 130                                continue;
 131
 132                        ret = reservation_object_reserve_shared(bo->resv,
 133                                                                entry->num_shared);
 134                        if (!ret)
 135                                continue;
 136                }
 137
 138                /* uh oh, we lost out, drop every reservation and try
 139                 * to only reserve this buffer, then start over if
 140                 * this succeeds.
 141                 */
 142                ttm_eu_backoff_reservation_reverse(list, entry);
 143
 144                if (ret == -EDEADLK) {
 145                        if (intr) {
 146                                ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
 147                                                                       ticket);
 148                        } else {
 149                                ww_mutex_lock_slow(&bo->resv->lock, ticket);
 150                                ret = 0;
 151                        }
 152                }
 153
 154                if (!ret && entry->num_shared)
 155                        ret = reservation_object_reserve_shared(bo->resv,
 156                                                                entry->num_shared);
 157
 158                if (unlikely(ret != 0)) {
 159                        if (ret == -EINTR)
 160                                ret = -ERESTARTSYS;
 161                        if (ticket) {
 162                                ww_acquire_done(ticket);
 163                                ww_acquire_fini(ticket);
 164                        }
 165                        return ret;
 166                }
 167
 168                /* move this item to the front of the list,
 169                 * forces correct iteration of the loop without keeping track
 170                 */
 171                list_del(&entry->head);
 172                list_add(&entry->head, list);
 173        }
 174
 175        if (ticket)
 176                ww_acquire_done(ticket);
 177        spin_lock(&glob->lru_lock);
 178        ttm_eu_del_from_lru_locked(list);
 179        spin_unlock(&glob->lru_lock);
 180        return 0;
 181}
 182EXPORT_SYMBOL(ttm_eu_reserve_buffers);
 183
 184void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 185                                 struct list_head *list,
 186                                 struct dma_fence *fence)
 187{
 188        struct ttm_validate_buffer *entry;
 189        struct ttm_buffer_object *bo;
 190        struct ttm_bo_global *glob;
 191        struct ttm_bo_device *bdev;
 192
 193        if (list_empty(list))
 194                return;
 195
 196        bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
 197        bdev = bo->bdev;
 198        glob = bo->bdev->glob;
 199
 200        spin_lock(&glob->lru_lock);
 201
 202        list_for_each_entry(entry, list, head) {
 203                bo = entry->bo;
 204                if (entry->num_shared)
 205                        reservation_object_add_shared_fence(bo->resv, fence);
 206                else
 207                        reservation_object_add_excl_fence(bo->resv, fence);
 208                ttm_bo_add_to_lru(bo);
 209                reservation_object_unlock(bo->resv);
 210        }
 211        spin_unlock(&glob->lru_lock);
 212        if (ticket)
 213                ww_acquire_fini(ticket);
 214}
 215EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
 216