linux/include/drm/ttm/ttm_bo_driver.h
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30#ifndef _TTM_BO_DRIVER_H_
  31#define _TTM_BO_DRIVER_H_
  32
  33#include <drm/drm_mm.h>
  34#include <drm/drm_vma_manager.h>
  35#include <linux/workqueue.h>
  36#include <linux/fs.h>
  37#include <linux/spinlock.h>
  38#include <linux/dma-resv.h>
  39
  40#include <drm/ttm/ttm_device.h>
  41
  42#include "ttm_bo_api.h"
  43#include "ttm_kmap_iter.h"
  44#include "ttm_placement.h"
  45#include "ttm_tt.h"
  46#include "ttm_pool.h"
  47
  48/**
  49 * struct ttm_lru_bulk_move_pos
  50 *
  51 * @first: first BO in the bulk move range
  52 * @last: last BO in the bulk move range
  53 *
  54 * Positions for a lru bulk move.
  55 */
  56struct ttm_lru_bulk_move_pos {
  57        struct ttm_buffer_object *first;
  58        struct ttm_buffer_object *last;
  59};
  60
  61/**
  62 * struct ttm_lru_bulk_move
  63 *
  64 * @tt: first/last lru entry for BOs in the TT domain
  65 * @vram: first/last lru entry for BOs in the VRAM domain
  66 * @swap: first/last lru entry for BOs on the swap list
  67 *
  68 * Helper structure for bulk moves on the LRU list.
  69 */
  70struct ttm_lru_bulk_move {
  71        struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
  72        struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
  73};
  74
  75/*
  76 * ttm_bo.c
  77 */
  78
  79/**
  80 * ttm_bo_mem_space
  81 *
  82 * @bo: Pointer to a struct ttm_buffer_object. the data of which
  83 * we want to allocate space for.
  84 * @proposed_placement: Proposed new placement for the buffer object.
  85 * @mem: A struct ttm_resource.
  86 * @interruptible: Sleep interruptible when sliping.
  87 * @no_wait_gpu: Return immediately if the GPU is busy.
  88 *
  89 * Allocate memory space for the buffer object pointed to by @bo, using
  90 * the placement flags in @mem, potentially evicting other idle buffer objects.
  91 * This function may sleep while waiting for space to become available.
  92 * Returns:
  93 * -EBUSY: No space available (only if no_wait == 1).
  94 * -ENOMEM: Could not allocate memory for the buffer object, either due to
  95 * fragmentation or concurrent allocators.
  96 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
  97 */
  98int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  99                     struct ttm_placement *placement,
 100                     struct ttm_resource **mem,
 101                     struct ttm_operation_ctx *ctx);
 102
 103/**
 104 * ttm_bo_unmap_virtual
 105 *
 106 * @bo: tear down the virtual mappings for this BO
 107 */
 108void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 109
 110/**
 111 * ttm_bo_reserve:
 112 *
 113 * @bo: A pointer to a struct ttm_buffer_object.
 114 * @interruptible: Sleep interruptible if waiting.
 115 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 116 * @ticket: ticket used to acquire the ww_mutex.
 117 *
 118 * Locks a buffer object for validation. (Or prevents other processes from
 119 * locking it for validation), while taking a number of measures to prevent
 120 * deadlocks.
 121 *
 122 * Returns:
 123 * -EDEADLK: The reservation may cause a deadlock.
 124 * Release all buffer reservations, wait for @bo to become unreserved and
 125 * try again.
 126 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 127 * a signal. Release all buffer reservations and return to user-space.
 128 * -EBUSY: The function needed to sleep, but @no_wait was true
 129 * -EALREADY: Bo already reserved using @ticket. This error code will only
 130 * be returned if @use_ticket is set to true.
 131 */
 132static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
 133                                 bool interruptible, bool no_wait,
 134                                 struct ww_acquire_ctx *ticket)
 135{
 136        int ret = 0;
 137
 138        if (no_wait) {
 139                bool success;
 140                if (WARN_ON(ticket))
 141                        return -EBUSY;
 142
 143                success = dma_resv_trylock(bo->base.resv);
 144                return success ? 0 : -EBUSY;
 145        }
 146
 147        if (interruptible)
 148                ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
 149        else
 150                ret = dma_resv_lock(bo->base.resv, ticket);
 151        if (ret == -EINTR)
 152                return -ERESTARTSYS;
 153        return ret;
 154}
 155
 156/**
 157 * ttm_bo_reserve_slowpath:
 158 * @bo: A pointer to a struct ttm_buffer_object.
 159 * @interruptible: Sleep interruptible if waiting.
 160 * @sequence: Set (@bo)->sequence to this value after lock
 161 *
 162 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
 163 * from all our other reservations. Because there are no other reservations
 164 * held by us, this function cannot deadlock any more.
 165 */
 166static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 167                                          bool interruptible,
 168                                          struct ww_acquire_ctx *ticket)
 169{
 170        if (interruptible) {
 171                int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
 172                                                           ticket);
 173                if (ret == -EINTR)
 174                        ret = -ERESTARTSYS;
 175                return ret;
 176        }
 177        dma_resv_lock_slow(bo->base.resv, ticket);
 178        return 0;
 179}
 180
 181static inline void
 182ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
 183{
 184        spin_lock(&bo->bdev->lru_lock);
 185        ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
 186        spin_unlock(&bo->bdev->lru_lock);
 187}
 188
 189static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
 190                                     struct ttm_resource *new_mem)
 191{
 192        WARN_ON(bo->resource);
 193        bo->resource = new_mem;
 194}
 195
 196/**
 197 * ttm_bo_move_null = assign memory for a buffer object.
 198 * @bo: The bo to assign the memory to
 199 * @new_mem: The memory to be assigned.
 200 *
 201 * Assign the memory from new_mem to the memory of the buffer object bo.
 202 */
 203static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
 204                                    struct ttm_resource *new_mem)
 205{
 206        ttm_resource_free(bo, &bo->resource);
 207        ttm_bo_assign_mem(bo, new_mem);
 208}
 209
 210/**
 211 * ttm_bo_unreserve
 212 *
 213 * @bo: A pointer to a struct ttm_buffer_object.
 214 *
 215 * Unreserve a previous reservation of @bo.
 216 */
 217static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 218{
 219        ttm_bo_move_to_lru_tail_unlocked(bo);
 220        dma_resv_unlock(bo->base.resv);
 221}
 222
 223/*
 224 * ttm_bo_util.c
 225 */
 226int ttm_mem_io_reserve(struct ttm_device *bdev,
 227                       struct ttm_resource *mem);
 228void ttm_mem_io_free(struct ttm_device *bdev,
 229                     struct ttm_resource *mem);
 230
 231/**
 232 * ttm_bo_move_memcpy
 233 *
 234 * @bo: A pointer to a struct ttm_buffer_object.
 235 * @interruptible: Sleep interruptible if waiting.
 236 * @no_wait_gpu: Return immediately if the GPU is busy.
 237 * @new_mem: struct ttm_resource indicating where to move.
 238 *
 239 * Fallback move function for a mappable buffer object in mappable memory.
 240 * The function will, if successful,
 241 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
 242 * and update the (@bo)->mem placement flags. If unsuccessful, the old
 243 * data remains untouched, and it's up to the caller to free the
 244 * memory space indicated by @new_mem.
 245 * Returns:
 246 * !0: Failure.
 247 */
 248
 249int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 250                       struct ttm_operation_ctx *ctx,
 251                       struct ttm_resource *new_mem);
 252
 253/**
 254 * ttm_bo_move_accel_cleanup.
 255 *
 256 * @bo: A pointer to a struct ttm_buffer_object.
 257 * @fence: A fence object that signals when moving is complete.
 258 * @evict: This is an evict move. Don't return until the buffer is idle.
 259 * @pipeline: evictions are to be pipelined.
 260 * @new_mem: struct ttm_resource indicating where to move.
 261 *
 262 * Accelerated move function to be called when an accelerated move
 263 * has been scheduled. The function will create a new temporary buffer object
 264 * representing the old placement, and put the sync object on both buffer
 265 * objects. After that the newly created buffer object is unref'd to be
 266 * destroyed when the move is complete. This will help pipeline
 267 * buffer moves.
 268 */
 269int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 270                              struct dma_fence *fence, bool evict,
 271                              bool pipeline,
 272                              struct ttm_resource *new_mem);
 273
 274/**
 275 * ttm_bo_move_accel_cleanup.
 276 *
 277 * @bo: A pointer to a struct ttm_buffer_object.
 278 * @new_mem: struct ttm_resource indicating where to move.
 279 *
 280 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
 281 * by the caller to be idle. Typically used after memcpy buffer moves.
 282 */
 283static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
 284                                            struct ttm_resource *new_mem)
 285{
 286        int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
 287
 288        WARN_ON(ret);
 289}
 290
 291/**
 292 * ttm_bo_pipeline_gutting.
 293 *
 294 * @bo: A pointer to a struct ttm_buffer_object.
 295 *
 296 * Pipelined gutting a BO of its backing store.
 297 */
 298int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
 299
 300/**
 301 * ttm_io_prot
 302 *
 303 * bo: ttm buffer object
 304 * res: ttm resource object
 305 * @tmp: Page protection flag for a normal, cached mapping.
 306 *
 307 * Utility function that returns the pgprot_t that should be used for
 308 * setting up a PTE with the caching model indicated by @c_state.
 309 */
 310pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
 311                     pgprot_t tmp);
 312
 313/**
 314 * ttm_bo_tt_bind
 315 *
 316 * Bind the object tt to a memory resource.
 317 */
 318int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
 319
 320/**
 321 * ttm_bo_tt_destroy.
 322 */
 323void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
 324
 325void ttm_move_memcpy(struct ttm_buffer_object *bo,
 326                     u32 num_pages,
 327                     struct ttm_kmap_iter *dst_iter,
 328                     struct ttm_kmap_iter *src_iter);
 329
 330struct ttm_kmap_iter *
 331ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
 332                         struct io_mapping *iomap,
 333                         struct sg_table *st,
 334                         resource_size_t start);
 335#endif
 336