linux/include/drm/ttm/ttm_bo_driver.h
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30#ifndef _TTM_BO_DRIVER_H_
  31#define _TTM_BO_DRIVER_H_
  32
  33#include <drm/drm_mm.h>
  34#include <drm/drm_global.h>
  35#include <drm/drm_vma_manager.h>
  36#include <linux/workqueue.h>
  37#include <linux/fs.h>
  38#include <linux/spinlock.h>
  39#include <linux/reservation.h>
  40
  41#include "ttm_bo_api.h"
  42#include "ttm_memory.h"
  43#include "ttm_module.h"
  44#include "ttm_placement.h"
  45
  46#define TTM_MAX_BO_PRIORITY     4U
  47
  48struct ttm_backend_func {
  49        /**
  50         * struct ttm_backend_func member bind
  51         *
  52         * @ttm: Pointer to a struct ttm_tt.
  53         * @bo_mem: Pointer to a struct ttm_mem_reg describing the
  54         * memory type and location for binding.
  55         *
  56         * Bind the backend pages into the aperture in the location
  57         * indicated by @bo_mem. This function should be able to handle
  58         * differences between aperture and system page sizes.
  59         */
  60        int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
  61
  62        /**
  63         * struct ttm_backend_func member unbind
  64         *
  65         * @ttm: Pointer to a struct ttm_tt.
  66         *
  67         * Unbind previously bound backend pages. This function should be
  68         * able to handle differences between aperture and system page sizes.
  69         */
  70        int (*unbind) (struct ttm_tt *ttm);
  71
  72        /**
  73         * struct ttm_backend_func member destroy
  74         *
  75         * @ttm: Pointer to a struct ttm_tt.
  76         *
  77         * Destroy the backend. This will be call back from ttm_tt_destroy so
  78         * don't call ttm_tt_destroy from the callback or infinite loop.
  79         */
  80        void (*destroy) (struct ttm_tt *ttm);
  81};
  82
  83#define TTM_PAGE_FLAG_WRITE           (1 << 3)
  84#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
  85#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
  86#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
  87#define TTM_PAGE_FLAG_DMA32           (1 << 7)
  88#define TTM_PAGE_FLAG_SG              (1 << 8)
  89
  90enum ttm_caching_state {
  91        tt_uncached,
  92        tt_wc,
  93        tt_cached
  94};
  95
  96/**
  97 * struct ttm_tt
  98 *
  99 * @bdev: Pointer to a struct ttm_bo_device.
 100 * @func: Pointer to a struct ttm_backend_func that describes
 101 * the backend methods.
 102 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
 103 * pointer.
 104 * @pages: Array of pages backing the data.
 105 * @num_pages: Number of pages in the page array.
 106 * @bdev: Pointer to the current struct ttm_bo_device.
 107 * @be: Pointer to the ttm backend.
 108 * @swap_storage: Pointer to shmem struct file for swap storage.
 109 * @caching_state: The current caching state of the pages.
 110 * @state: The current binding state of the pages.
 111 *
 112 * This is a structure holding the pages, caching- and aperture binding
 113 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
 114 * memory.
 115 */
 116
 117struct ttm_tt {
 118        struct ttm_bo_device *bdev;
 119        struct ttm_backend_func *func;
 120        struct page *dummy_read_page;
 121        struct page **pages;
 122        uint32_t page_flags;
 123        unsigned long num_pages;
 124        struct sg_table *sg; /* for SG objects via dma-buf */
 125        struct ttm_bo_global *glob;
 126        struct file *swap_storage;
 127        enum ttm_caching_state caching_state;
 128        enum {
 129                tt_bound,
 130                tt_unbound,
 131                tt_unpopulated,
 132        } state;
 133};
 134
 135/**
 136 * struct ttm_dma_tt
 137 *
 138 * @ttm: Base ttm_tt struct.
 139 * @dma_address: The DMA (bus) addresses of the pages
 140 * @pages_list: used by some page allocation backend
 141 *
 142 * This is a structure holding the pages, caching- and aperture binding
 143 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
 144 * memory.
 145 */
 146struct ttm_dma_tt {
 147        struct ttm_tt ttm;
 148        dma_addr_t *dma_address;
 149        struct list_head pages_list;
 150};
 151
 152#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0) /* Fixed (on-card) PCI memory */
 153#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1) /* Memory mappable */
 154#define TTM_MEMTYPE_FLAG_CMA           (1 << 3) /* Can't map aperture */
 155
 156struct ttm_mem_type_manager;
 157
 158struct ttm_mem_type_manager_func {
 159        /**
 160         * struct ttm_mem_type_manager member init
 161         *
 162         * @man: Pointer to a memory type manager.
 163         * @p_size: Implementation dependent, but typically the size of the
 164         * range to be managed in pages.
 165         *
 166         * Called to initialize a private range manager. The function is
 167         * expected to initialize the man::priv member.
 168         * Returns 0 on success, negative error code on failure.
 169         */
 170        int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
 171
 172        /**
 173         * struct ttm_mem_type_manager member takedown
 174         *
 175         * @man: Pointer to a memory type manager.
 176         *
 177         * Called to undo the setup done in init. All allocated resources
 178         * should be freed.
 179         */
 180        int  (*takedown)(struct ttm_mem_type_manager *man);
 181
 182        /**
 183         * struct ttm_mem_type_manager member get_node
 184         *
 185         * @man: Pointer to a memory type manager.
 186         * @bo: Pointer to the buffer object we're allocating space for.
 187         * @placement: Placement details.
 188         * @flags: Additional placement flags.
 189         * @mem: Pointer to a struct ttm_mem_reg to be filled in.
 190         *
 191         * This function should allocate space in the memory type managed
 192         * by @man. Placement details if
 193         * applicable are given by @placement. If successful,
 194         * @mem::mm_node should be set to a non-null value, and
 195         * @mem::start should be set to a value identifying the beginning
 196         * of the range allocated, and the function should return zero.
 197         * If the memory region accommodate the buffer object, @mem::mm_node
 198         * should be set to NULL, and the function should return 0.
 199         * If a system error occurred, preventing the request to be fulfilled,
 200         * the function should return a negative error code.
 201         *
 202         * Note that @mem::mm_node will only be dereferenced by
 203         * struct ttm_mem_type_manager functions and optionally by the driver,
 204         * which has knowledge of the underlying type.
 205         *
 206         * This function may not be called from within atomic context, so
 207         * an implementation can and must use either a mutex or a spinlock to
 208         * protect any data structures managing the space.
 209         */
 210        int  (*get_node)(struct ttm_mem_type_manager *man,
 211                         struct ttm_buffer_object *bo,
 212                         const struct ttm_place *place,
 213                         struct ttm_mem_reg *mem);
 214
 215        /**
 216         * struct ttm_mem_type_manager member put_node
 217         *
 218         * @man: Pointer to a memory type manager.
 219         * @mem: Pointer to a struct ttm_mem_reg to be filled in.
 220         *
 221         * This function frees memory type resources previously allocated
 222         * and that are identified by @mem::mm_node and @mem::start. May not
 223         * be called from within atomic context.
 224         */
 225        void (*put_node)(struct ttm_mem_type_manager *man,
 226                         struct ttm_mem_reg *mem);
 227
 228        /**
 229         * struct ttm_mem_type_manager member debug
 230         *
 231         * @man: Pointer to a memory type manager.
 232         * @printer: Prefix to be used in printout to identify the caller.
 233         *
 234         * This function is called to print out the state of the memory
 235         * type manager to aid debugging of out-of-memory conditions.
 236         * It may not be called from within atomic context.
 237         */
 238        void (*debug)(struct ttm_mem_type_manager *man,
 239                      struct drm_printer *printer);
 240};
 241
 242/**
 243 * struct ttm_mem_type_manager
 244 *
 245 * @has_type: The memory type has been initialized.
 246 * @use_type: The memory type is enabled.
 247 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
 248 * managed by this memory type.
 249 * @gpu_offset: If used, the GPU offset of the first managed page of
 250 * fixed memory or the first managed location in an aperture.
 251 * @size: Size of the managed region.
 252 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
 253 * as defined in ttm_placement_common.h
 254 * @default_caching: The default caching policy used for a buffer object
 255 * placed in this memory type if the user doesn't provide one.
 256 * @func: structure pointer implementing the range manager. See above
 257 * @priv: Driver private closure for @func.
 258 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
 259 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
 260 * reserved by the TTM vm system.
 261 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
 262 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
 263 * @move_lock: lock for move fence
 264 * static information. bdev::driver::io_mem_free is never used.
 265 * @lru: The lru list for this memory type.
 266 * @move: The fence of the last pipelined move operation.
 267 *
 268 * This structure is used to identify and manage memory types for a device.
 269 * It's set up by the ttm_bo_driver::init_mem_type method.
 270 */
 271
 272
 273
 274struct ttm_mem_type_manager {
 275        struct ttm_bo_device *bdev;
 276
 277        /*
 278         * No protection. Constant from start.
 279         */
 280
 281        bool has_type;
 282        bool use_type;
 283        uint32_t flags;
 284        uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
 285        uint64_t size;
 286        uint32_t available_caching;
 287        uint32_t default_caching;
 288        const struct ttm_mem_type_manager_func *func;
 289        void *priv;
 290        struct mutex io_reserve_mutex;
 291        bool use_io_reserve_lru;
 292        bool io_reserve_fastpath;
 293        spinlock_t move_lock;
 294
 295        /*
 296         * Protected by @io_reserve_mutex:
 297         */
 298
 299        struct list_head io_reserve_lru;
 300
 301        /*
 302         * Protected by the global->lru_lock.
 303         */
 304
 305        struct list_head lru[TTM_MAX_BO_PRIORITY];
 306
 307        /*
 308         * Protected by @move_lock.
 309         */
 310        struct dma_fence *move;
 311};
 312
 313/**
 314 * struct ttm_bo_driver
 315 *
 316 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
 317 * @invalidate_caches: Callback to invalidate read caches when a buffer object
 318 * has been evicted.
 319 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
 320 * structure.
 321 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
 322 * @move: Callback for a driver to hook in accelerated functions to
 323 * move a buffer.
 324 * If set to NULL, a potentially slow memcpy() move is used.
 325 */
 326
 327struct ttm_bo_driver {
 328        /**
 329         * ttm_tt_create
 330         *
 331         * @bdev: pointer to a struct ttm_bo_device:
 332         * @size: Size of the data needed backing.
 333         * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
 334         * @dummy_read_page: See struct ttm_bo_device.
 335         *
 336         * Create a struct ttm_tt to back data with system memory pages.
 337         * No pages are actually allocated.
 338         * Returns:
 339         * NULL: Out of memory.
 340         */
 341        struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
 342                                        unsigned long size,
 343                                        uint32_t page_flags,
 344                                        struct page *dummy_read_page);
 345
 346        /**
 347         * ttm_tt_populate
 348         *
 349         * @ttm: The struct ttm_tt to contain the backing pages.
 350         *
 351         * Allocate all backing pages
 352         * Returns:
 353         * -ENOMEM: Out of memory.
 354         */
 355        int (*ttm_tt_populate)(struct ttm_tt *ttm);
 356
 357        /**
 358         * ttm_tt_unpopulate
 359         *
 360         * @ttm: The struct ttm_tt to contain the backing pages.
 361         *
 362         * Free all backing page
 363         */
 364        void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
 365
 366        /**
 367         * struct ttm_bo_driver member invalidate_caches
 368         *
 369         * @bdev: the buffer object device.
 370         * @flags: new placement of the rebound buffer object.
 371         *
 372         * A previosly evicted buffer has been rebound in a
 373         * potentially new location. Tell the driver that it might
 374         * consider invalidating read (texture) caches on the next command
 375         * submission as a consequence.
 376         */
 377
 378        int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
 379        int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
 380                             struct ttm_mem_type_manager *man);
 381
 382        /**
 383         * struct ttm_bo_driver member eviction_valuable
 384         *
 385         * @bo: the buffer object to be evicted
 386         * @place: placement we need room for
 387         *
 388         * Check with the driver if it is valuable to evict a BO to make room
 389         * for a certain placement.
 390         */
 391        bool (*eviction_valuable)(struct ttm_buffer_object *bo,
 392                                  const struct ttm_place *place);
 393        /**
 394         * struct ttm_bo_driver member evict_flags:
 395         *
 396         * @bo: the buffer object to be evicted
 397         *
 398         * Return the bo flags for a buffer which is not mapped to the hardware.
 399         * These will be placed in proposed_flags so that when the move is
 400         * finished, they'll end up in bo->mem.flags
 401         */
 402
 403        void (*evict_flags)(struct ttm_buffer_object *bo,
 404                            struct ttm_placement *placement);
 405
 406        /**
 407         * struct ttm_bo_driver member move:
 408         *
 409         * @bo: the buffer to move
 410         * @evict: whether this motion is evicting the buffer from
 411         * the graphics address space
 412         * @interruptible: Use interruptible sleeps if possible when sleeping.
 413         * @no_wait: whether this should give up and return -EBUSY
 414         * if this move would require sleeping
 415         * @new_mem: the new memory region receiving the buffer
 416         *
 417         * Move a buffer between two memory regions.
 418         */
 419        int (*move)(struct ttm_buffer_object *bo, bool evict,
 420                    bool interruptible, bool no_wait_gpu,
 421                    struct ttm_mem_reg *new_mem);
 422
 423        /**
 424         * struct ttm_bo_driver_member verify_access
 425         *
 426         * @bo: Pointer to a buffer object.
 427         * @filp: Pointer to a struct file trying to access the object.
 428         *
 429         * Called from the map / write / read methods to verify that the
 430         * caller is permitted to access the buffer object.
 431         * This member may be set to NULL, which will refuse this kind of
 432         * access for all buffer objects.
 433         * This function should return 0 if access is granted, -EPERM otherwise.
 434         */
 435        int (*verify_access)(struct ttm_buffer_object *bo,
 436                             struct file *filp);
 437
 438        /**
 439         * Hook to notify driver about a driver move so it
 440         * can do tiling things and book-keeping.
 441         *
 442         * @evict: whether this move is evicting the buffer from the graphics
 443         * address space
 444         */
 445        void (*move_notify)(struct ttm_buffer_object *bo,
 446                            bool evict,
 447                            struct ttm_mem_reg *new_mem);
 448        /* notify the driver we are taking a fault on this BO
 449         * and have reserved it */
 450        int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 451
 452        /**
 453         * notify the driver that we're about to swap out this bo
 454         */
 455        void (*swap_notify)(struct ttm_buffer_object *bo);
 456
 457        /**
 458         * Driver callback on when mapping io memory (for bo_move_memcpy
 459         * for instance). TTM will take care to call io_mem_free whenever
 460         * the mapping is not use anymore. io_mem_reserve & io_mem_free
 461         * are balanced.
 462         */
 463        int (*io_mem_reserve)(struct ttm_bo_device *bdev,
 464                              struct ttm_mem_reg *mem);
 465        void (*io_mem_free)(struct ttm_bo_device *bdev,
 466                            struct ttm_mem_reg *mem);
 467
 468        /**
 469         * Return the pfn for a given page_offset inside the BO.
 470         *
 471         * @bo: the BO to look up the pfn for
 472         * @page_offset: the offset to look up
 473         */
 474        unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
 475                                    unsigned long page_offset);
 476
 477        /**
 478         * Read/write memory buffers for ptrace access
 479         *
 480         * @bo: the BO to access
 481         * @offset: the offset from the start of the BO
 482         * @buf: pointer to source/destination buffer
 483         * @len: number of bytes to copy
 484         * @write: whether to read (0) from or write (non-0) to BO
 485         *
 486         * If successful, this function should return the number of
 487         * bytes copied, -EIO otherwise. If the number of bytes
 488         * returned is < len, the function may be called again with
 489         * the remainder of the buffer to copy.
 490         */
 491        int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
 492                             void *buf, int len, int write);
 493};
 494
 495/**
 496 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
 497 */
 498
 499struct ttm_bo_global_ref {
 500        struct drm_global_reference ref;
 501        struct ttm_mem_global *mem_glob;
 502};
 503
 504/**
 505 * struct ttm_bo_global - Buffer object driver global data.
 506 *
 507 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
 508 * @dummy_read_page: Pointer to a dummy page used for mapping requests
 509 * of unpopulated pages.
 510 * @shrink: A shrink callback object used for buffer object swap.
 511 * @device_list_mutex: Mutex protecting the device list.
 512 * This mutex is held while traversing the device list for pm options.
 513 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
 514 * @device_list: List of buffer object devices.
 515 * @swap_lru: Lru list of buffer objects used for swapping.
 516 */
 517
 518struct ttm_bo_global {
 519
 520        /**
 521         * Constant after init.
 522         */
 523
 524        struct kobject kobj;
 525        struct ttm_mem_global *mem_glob;
 526        struct page *dummy_read_page;
 527        struct ttm_mem_shrink shrink;
 528        struct mutex device_list_mutex;
 529        spinlock_t lru_lock;
 530
 531        /**
 532         * Protected by device_list_mutex.
 533         */
 534        struct list_head device_list;
 535
 536        /**
 537         * Protected by the lru_lock.
 538         */
 539        struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
 540
 541        /**
 542         * Internal protection.
 543         */
 544        atomic_t bo_count;
 545};
 546
 547
 548#define TTM_NUM_MEM_TYPES 8
 549
 550/**
 551 * struct ttm_bo_device - Buffer object driver device-specific data.
 552 *
 553 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
 554 * @man: An array of mem_type_managers.
 555 * @vma_manager: Address space manager
 556 * lru_lock: Spinlock that protects the buffer+device lru lists and
 557 * ddestroy lists.
 558 * @dev_mapping: A pointer to the struct address_space representing the
 559 * device address space.
 560 * @wq: Work queue structure for the delayed delete workqueue.
 561 *
 562 */
 563
 564struct ttm_bo_device {
 565
 566        /*
 567         * Constant after bo device init / atomic.
 568         */
 569        struct list_head device_list;
 570        struct ttm_bo_global *glob;
 571        struct ttm_bo_driver *driver;
 572        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
 573
 574        /*
 575         * Protected by internal locks.
 576         */
 577        struct drm_vma_offset_manager vma_manager;
 578
 579        /*
 580         * Protected by the global:lru lock.
 581         */
 582        struct list_head ddestroy;
 583
 584        /*
 585         * Protected by load / firstopen / lastclose /unload sync.
 586         */
 587
 588        struct address_space *dev_mapping;
 589
 590        /*
 591         * Internal protection.
 592         */
 593
 594        struct delayed_work wq;
 595
 596        bool need_dma32;
 597};
 598
 599/**
 600 * ttm_flag_masked
 601 *
 602 * @old: Pointer to the result and original value.
 603 * @new: New value of bits.
 604 * @mask: Mask of bits to change.
 605 *
 606 * Convenience function to change a number of bits identified by a mask.
 607 */
 608
 609static inline uint32_t
 610ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 611{
 612        *old ^= (*old ^ new) & mask;
 613        return *old;
 614}
 615
 616/**
 617 * ttm_tt_init
 618 *
 619 * @ttm: The struct ttm_tt.
 620 * @bdev: pointer to a struct ttm_bo_device:
 621 * @size: Size of the data needed backing.
 622 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
 623 * @dummy_read_page: See struct ttm_bo_device.
 624 *
 625 * Create a struct ttm_tt to back data with system memory pages.
 626 * No pages are actually allocated.
 627 * Returns:
 628 * NULL: Out of memory.
 629 */
 630extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
 631                        unsigned long size, uint32_t page_flags,
 632                        struct page *dummy_read_page);
 633extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
 634                           unsigned long size, uint32_t page_flags,
 635                           struct page *dummy_read_page);
 636
 637/**
 638 * ttm_tt_fini
 639 *
 640 * @ttm: the ttm_tt structure.
 641 *
 642 * Free memory of ttm_tt structure
 643 */
 644extern void ttm_tt_fini(struct ttm_tt *ttm);
 645extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
 646
 647/**
 648 * ttm_ttm_bind:
 649 *
 650 * @ttm: The struct ttm_tt containing backing pages.
 651 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
 652 *
 653 * Bind the pages of @ttm to an aperture location identified by @bo_mem
 654 */
 655extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 656
 657/**
 658 * ttm_ttm_destroy:
 659 *
 660 * @ttm: The struct ttm_tt.
 661 *
 662 * Unbind, unpopulate and destroy common struct ttm_tt.
 663 */
 664extern void ttm_tt_destroy(struct ttm_tt *ttm);
 665
 666/**
 667 * ttm_ttm_unbind:
 668 *
 669 * @ttm: The struct ttm_tt.
 670 *
 671 * Unbind a struct ttm_tt.
 672 */
 673extern void ttm_tt_unbind(struct ttm_tt *ttm);
 674
 675/**
 676 * ttm_tt_swapin:
 677 *
 678 * @ttm: The struct ttm_tt.
 679 *
 680 * Swap in a previously swap out ttm_tt.
 681 */
 682extern int ttm_tt_swapin(struct ttm_tt *ttm);
 683
 684/**
 685 * ttm_tt_set_placement_caching:
 686 *
 687 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
 688 * @placement: Flag indicating the desired caching policy.
 689 *
 690 * This function will change caching policy of any default kernel mappings of
 691 * the pages backing @ttm. If changing from cached to uncached or
 692 * write-combined,
 693 * all CPU caches will first be flushed to make sure the data of the pages
 694 * hit RAM. This function may be very costly as it involves global TLB
 695 * and cache flushes and potential page splitting / combining.
 696 */
 697extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
 698extern int ttm_tt_swapout(struct ttm_tt *ttm,
 699                          struct file *persistent_swap_storage);
 700
 701/**
 702 * ttm_tt_unpopulate - free pages from a ttm
 703 *
 704 * @ttm: Pointer to the ttm_tt structure
 705 *
 706 * Calls the driver method to free all pages from a ttm
 707 */
 708extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
 709
 710/*
 711 * ttm_bo.c
 712 */
 713
 714/**
 715 * ttm_mem_reg_is_pci
 716 *
 717 * @bdev: Pointer to a struct ttm_bo_device.
 718 * @mem: A valid struct ttm_mem_reg.
 719 *
 720 * Returns true if the memory described by @mem is PCI memory,
 721 * false otherwise.
 722 */
 723extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
 724                                   struct ttm_mem_reg *mem);
 725
 726/**
 727 * ttm_bo_mem_space
 728 *
 729 * @bo: Pointer to a struct ttm_buffer_object. the data of which
 730 * we want to allocate space for.
 731 * @proposed_placement: Proposed new placement for the buffer object.
 732 * @mem: A struct ttm_mem_reg.
 733 * @interruptible: Sleep interruptible when sliping.
 734 * @no_wait_gpu: Return immediately if the GPU is busy.
 735 *
 736 * Allocate memory space for the buffer object pointed to by @bo, using
 737 * the placement flags in @mem, potentially evicting other idle buffer objects.
 738 * This function may sleep while waiting for space to become available.
 739 * Returns:
 740 * -EBUSY: No space available (only if no_wait == 1).
 741 * -ENOMEM: Could not allocate memory for the buffer object, either due to
 742 * fragmentation or concurrent allocators.
 743 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
 744 */
 745extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 746                                struct ttm_placement *placement,
 747                                struct ttm_mem_reg *mem,
 748                                bool interruptible,
 749                                bool no_wait_gpu);
 750
 751extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
 752                           struct ttm_mem_reg *mem);
 753extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
 754                                  struct ttm_mem_reg *mem);
 755
 756extern void ttm_bo_global_release(struct drm_global_reference *ref);
 757extern int ttm_bo_global_init(struct drm_global_reference *ref);
 758
 759extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
 760
 761/**
 762 * ttm_bo_device_init
 763 *
 764 * @bdev: A pointer to a struct ttm_bo_device to initialize.
 765 * @glob: A pointer to an initialized struct ttm_bo_global.
 766 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
 767 * @mapping: The address space to use for this bo.
 768 * @file_page_offset: Offset into the device address space that is available
 769 * for buffer data. This ensures compatibility with other users of the
 770 * address space.
 771 *
 772 * Initializes a struct ttm_bo_device:
 773 * Returns:
 774 * !0: Failure.
 775 */
 776extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
 777                              struct ttm_bo_global *glob,
 778                              struct ttm_bo_driver *driver,
 779                              struct address_space *mapping,
 780                              uint64_t file_page_offset, bool need_dma32);
 781
 782/**
 783 * ttm_bo_unmap_virtual
 784 *
 785 * @bo: tear down the virtual mappings for this BO
 786 */
 787extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 788
 789/**
 790 * ttm_bo_unmap_virtual
 791 *
 792 * @bo: tear down the virtual mappings for this BO
 793 *
 794 * The caller must take ttm_mem_io_lock before calling this function.
 795 */
 796extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
 797
 798extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
 799extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
 800extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
 801                           bool interruptible);
 802extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
 803
 804extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
 805extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
 806
 807/**
 808 * __ttm_bo_reserve:
 809 *
 810 * @bo: A pointer to a struct ttm_buffer_object.
 811 * @interruptible: Sleep interruptible if waiting.
 812 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 813 * @ticket: ticket used to acquire the ww_mutex.
 814 *
 815 * Will not remove reserved buffers from the lru lists.
 816 * Otherwise identical to ttm_bo_reserve.
 817 *
 818 * Returns:
 819 * -EDEADLK: The reservation may cause a deadlock.
 820 * Release all buffer reservations, wait for @bo to become unreserved and
 821 * try again. (only if use_sequence == 1).
 822 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 823 * a signal. Release all buffer reservations and return to user-space.
 824 * -EBUSY: The function needed to sleep, but @no_wait was true
 825 * -EALREADY: Bo already reserved using @ticket. This error code will only
 826 * be returned if @use_ticket is set to true.
 827 */
 828static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
 829                                   bool interruptible, bool no_wait,
 830                                   struct ww_acquire_ctx *ticket)
 831{
 832        int ret = 0;
 833
 834        if (no_wait) {
 835                bool success;
 836                if (WARN_ON(ticket))
 837                        return -EBUSY;
 838
 839                success = ww_mutex_trylock(&bo->resv->lock);
 840                return success ? 0 : -EBUSY;
 841        }
 842
 843        if (interruptible)
 844                ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
 845        else
 846                ret = ww_mutex_lock(&bo->resv->lock, ticket);
 847        if (ret == -EINTR)
 848                return -ERESTARTSYS;
 849        return ret;
 850}
 851
 852/**
 853 * ttm_bo_reserve:
 854 *
 855 * @bo: A pointer to a struct ttm_buffer_object.
 856 * @interruptible: Sleep interruptible if waiting.
 857 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 858 * @ticket: ticket used to acquire the ww_mutex.
 859 *
 860 * Locks a buffer object for validation. (Or prevents other processes from
 861 * locking it for validation) and removes it from lru lists, while taking
 862 * a number of measures to prevent deadlocks.
 863 *
 864 * Deadlocks may occur when two processes try to reserve multiple buffers in
 865 * different order, either by will or as a result of a buffer being evicted
 866 * to make room for a buffer already reserved. (Buffers are reserved before
 867 * they are evicted). The following algorithm prevents such deadlocks from
 868 * occurring:
 869 * Processes attempting to reserve multiple buffers other than for eviction,
 870 * (typically execbuf), should first obtain a unique 32-bit
 871 * validation sequence number,
 872 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
 873 * sequence number. If upon call of this function, the buffer object is already
 874 * reserved, the validation sequence is checked against the validation
 875 * sequence of the process currently reserving the buffer,
 876 * and if the current validation sequence is greater than that of the process
 877 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
 878 * waiting for the buffer to become unreserved, after which it retries
 879 * reserving.
 880 * The caller should, when receiving an -EDEADLK error
 881 * release all its buffer reservations, wait for @bo to become unreserved, and
 882 * then rerun the validation with the same validation sequence. This procedure
 883 * will always guarantee that the process with the lowest validation sequence
 884 * will eventually succeed, preventing both deadlocks and starvation.
 885 *
 886 * Returns:
 887 * -EDEADLK: The reservation may cause a deadlock.
 888 * Release all buffer reservations, wait for @bo to become unreserved and
 889 * try again. (only if use_sequence == 1).
 890 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 891 * a signal. Release all buffer reservations and return to user-space.
 892 * -EBUSY: The function needed to sleep, but @no_wait was true
 893 * -EALREADY: Bo already reserved using @ticket. This error code will only
 894 * be returned if @use_ticket is set to true.
 895 */
 896static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
 897                                 bool interruptible, bool no_wait,
 898                                 struct ww_acquire_ctx *ticket)
 899{
 900        int ret;
 901
 902        WARN_ON(!kref_read(&bo->kref));
 903
 904        ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
 905        if (likely(ret == 0))
 906                ttm_bo_del_sub_from_lru(bo);
 907
 908        return ret;
 909}
 910
 911/**
 912 * ttm_bo_reserve_slowpath:
 913 * @bo: A pointer to a struct ttm_buffer_object.
 914 * @interruptible: Sleep interruptible if waiting.
 915 * @sequence: Set (@bo)->sequence to this value after lock
 916 *
 917 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
 918 * from all our other reservations. Because there are no other reservations
 919 * held by us, this function cannot deadlock any more.
 920 */
 921static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 922                                          bool interruptible,
 923                                          struct ww_acquire_ctx *ticket)
 924{
 925        int ret = 0;
 926
 927        WARN_ON(!kref_read(&bo->kref));
 928
 929        if (interruptible)
 930                ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
 931                                                       ticket);
 932        else
 933                ww_mutex_lock_slow(&bo->resv->lock, ticket);
 934
 935        if (likely(ret == 0))
 936                ttm_bo_del_sub_from_lru(bo);
 937        else if (ret == -EINTR)
 938                ret = -ERESTARTSYS;
 939
 940        return ret;
 941}
 942
 943/**
 944 * __ttm_bo_unreserve
 945 * @bo: A pointer to a struct ttm_buffer_object.
 946 *
 947 * Unreserve a previous reservation of @bo where the buffer object is
 948 * already on lru lists.
 949 */
 950static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
 951{
 952        ww_mutex_unlock(&bo->resv->lock);
 953}
 954
 955/**
 956 * ttm_bo_unreserve
 957 *
 958 * @bo: A pointer to a struct ttm_buffer_object.
 959 *
 960 * Unreserve a previous reservation of @bo.
 961 */
 962static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 963{
 964        if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 965                spin_lock(&bo->glob->lru_lock);
 966                ttm_bo_add_to_lru(bo);
 967                spin_unlock(&bo->glob->lru_lock);
 968        }
 969        __ttm_bo_unreserve(bo);
 970}
 971
 972/**
 973 * ttm_bo_unreserve_ticket
 974 * @bo: A pointer to a struct ttm_buffer_object.
 975 * @ticket: ww_acquire_ctx used for reserving
 976 *
 977 * Unreserve a previous reservation of @bo made with @ticket.
 978 */
 979static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
 980                                           struct ww_acquire_ctx *t)
 981{
 982        ttm_bo_unreserve(bo);
 983}
 984
 985/*
 986 * ttm_bo_util.c
 987 */
 988
 989int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
 990                       struct ttm_mem_reg *mem);
 991void ttm_mem_io_free(struct ttm_bo_device *bdev,
 992                     struct ttm_mem_reg *mem);
 993/**
 994 * ttm_bo_move_ttm
 995 *
 996 * @bo: A pointer to a struct ttm_buffer_object.
 997 * @interruptible: Sleep interruptible if waiting.
 998 * @no_wait_gpu: Return immediately if the GPU is busy.
 999 * @new_mem: struct ttm_mem_reg indicating where to move.
1000 *
1001 * Optimized move function for a buffer object with both old and
1002 * new placement backed by a TTM. The function will, if successful,
1003 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
1004 * and update the (@bo)->mem placement flags. If unsuccessful, the old
1005 * data remains untouched, and it's up to the caller to free the
1006 * memory space indicated by @new_mem.
1007 * Returns:
1008 * !0: Failure.
1009 */
1010
1011extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
1012                           bool interruptible, bool no_wait_gpu,
1013                           struct ttm_mem_reg *new_mem);
1014
1015/**
1016 * ttm_bo_move_memcpy
1017 *
1018 * @bo: A pointer to a struct ttm_buffer_object.
1019 * @interruptible: Sleep interruptible if waiting.
1020 * @no_wait_gpu: Return immediately if the GPU is busy.
1021 * @new_mem: struct ttm_mem_reg indicating where to move.
1022 *
1023 * Fallback move function for a mappable buffer object in mappable memory.
1024 * The function will, if successful,
1025 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
1026 * and update the (@bo)->mem placement flags. If unsuccessful, the old
1027 * data remains untouched, and it's up to the caller to free the
1028 * memory space indicated by @new_mem.
1029 * Returns:
1030 * !0: Failure.
1031 */
1032
1033extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1034                              bool interruptible, bool no_wait_gpu,
1035                              struct ttm_mem_reg *new_mem);
1036
1037/**
1038 * ttm_bo_free_old_node
1039 *
1040 * @bo: A pointer to a struct ttm_buffer_object.
1041 *
1042 * Utility function to free an old placement after a successful move.
1043 */
1044extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1045
1046/**
1047 * ttm_bo_move_accel_cleanup.
1048 *
1049 * @bo: A pointer to a struct ttm_buffer_object.
1050 * @fence: A fence object that signals when moving is complete.
1051 * @evict: This is an evict move. Don't return until the buffer is idle.
1052 * @new_mem: struct ttm_mem_reg indicating where to move.
1053 *
1054 * Accelerated move function to be called when an accelerated move
1055 * has been scheduled. The function will create a new temporary buffer object
1056 * representing the old placement, and put the sync object on both buffer
1057 * objects. After that the newly created buffer object is unref'd to be
1058 * destroyed when the move is complete. This will help pipeline
1059 * buffer moves.
1060 */
1061
1062extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1063                                     struct dma_fence *fence, bool evict,
1064                                     struct ttm_mem_reg *new_mem);
1065
1066/**
1067 * ttm_bo_pipeline_move.
1068 *
1069 * @bo: A pointer to a struct ttm_buffer_object.
1070 * @fence: A fence object that signals when moving is complete.
1071 * @evict: This is an evict move. Don't return until the buffer is idle.
1072 * @new_mem: struct ttm_mem_reg indicating where to move.
1073 *
1074 * Function for pipelining accelerated moves. Either free the memory
1075 * immediately or hang it on a temporary buffer object.
1076 */
1077int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
1078                         struct dma_fence *fence, bool evict,
1079                         struct ttm_mem_reg *new_mem);
1080
1081/**
1082 * ttm_io_prot
1083 *
1084 * @c_state: Caching state.
1085 * @tmp: Page protection flag for a normal, cached mapping.
1086 *
1087 * Utility function that returns the pgprot_t that should be used for
1088 * setting up a PTE with the caching model indicated by @c_state.
1089 */
1090extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1091
1092extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1093
1094#if IS_ENABLED(CONFIG_AGP)
1095#include <linux/agp_backend.h>
1096
1097/**
1098 * ttm_agp_tt_create
1099 *
1100 * @bdev: Pointer to a struct ttm_bo_device.
1101 * @bridge: The agp bridge this device is sitting on.
1102 * @size: Size of the data needed backing.
1103 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1104 * @dummy_read_page: See struct ttm_bo_device.
1105 *
1106 *
1107 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1108 * for TT memory. This function uses the linux agpgart interface to
1109 * bind and unbind memory backing a ttm_tt.
1110 */
1111extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1112                                        struct agp_bridge_data *bridge,
1113                                        unsigned long size, uint32_t page_flags,
1114                                        struct page *dummy_read_page);
1115int ttm_agp_tt_populate(struct ttm_tt *ttm);
1116void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1117#endif
1118
1119#endif
1120