linux/include/drm/ttm/ttm_bo_driver.h
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30#ifndef _TTM_BO_DRIVER_H_
  31#define _TTM_BO_DRIVER_H_
  32
  33#include "ttm/ttm_bo_api.h"
  34#include "ttm/ttm_memory.h"
  35#include "ttm/ttm_module.h"
  36#include "drm_mm.h"
  37#include "linux/workqueue.h"
  38#include "linux/fs.h"
  39#include "linux/spinlock.h"
  40
  41struct ttm_backend;
  42
  43struct ttm_backend_func {
  44        /**
  45         * struct ttm_backend_func member populate
  46         *
  47         * @backend: Pointer to a struct ttm_backend.
  48         * @num_pages: Number of pages to populate.
  49         * @pages: Array of pointers to ttm pages.
  50         * @dummy_read_page: Page to be used instead of NULL pages in the
  51         * array @pages.
  52         *
  53         * Populate the backend with ttm pages. Depending on the backend,
  54         * it may or may not copy the @pages array.
  55         */
  56        int (*populate) (struct ttm_backend *backend,
  57                         unsigned long num_pages, struct page **pages,
  58                         struct page *dummy_read_page);
  59        /**
  60         * struct ttm_backend_func member clear
  61         *
  62         * @backend: Pointer to a struct ttm_backend.
  63         *
  64         * This is an "unpopulate" function. Release all resources
  65         * allocated with populate.
  66         */
  67        void (*clear) (struct ttm_backend *backend);
  68
  69        /**
  70         * struct ttm_backend_func member bind
  71         *
  72         * @backend: Pointer to a struct ttm_backend.
  73         * @bo_mem: Pointer to a struct ttm_mem_reg describing the
  74         * memory type and location for binding.
  75         *
  76         * Bind the backend pages into the aperture in the location
  77         * indicated by @bo_mem. This function should be able to handle
  78         * differences between aperture- and system page sizes.
  79         */
  80        int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
  81
  82        /**
  83         * struct ttm_backend_func member unbind
  84         *
  85         * @backend: Pointer to a struct ttm_backend.
  86         *
  87         * Unbind previously bound backend pages. This function should be
  88         * able to handle differences between aperture- and system page sizes.
  89         */
  90        int (*unbind) (struct ttm_backend *backend);
  91
  92        /**
  93         * struct ttm_backend_func member destroy
  94         *
  95         * @backend: Pointer to a struct ttm_backend.
  96         *
  97         * Destroy the backend.
  98         */
  99        void (*destroy) (struct ttm_backend *backend);
 100};
 101
 102/**
 103 * struct ttm_backend
 104 *
 105 * @bdev: Pointer to a struct ttm_bo_device.
 106 * @flags: For driver use.
 107 * @func: Pointer to a struct ttm_backend_func that describes
 108 * the backend methods.
 109 *
 110 */
 111
 112struct ttm_backend {
 113        struct ttm_bo_device *bdev;
 114        uint32_t flags;
 115        struct ttm_backend_func *func;
 116};
 117
 118#define TTM_PAGE_FLAG_VMALLOC         (1 << 0)
 119#define TTM_PAGE_FLAG_USER            (1 << 1)
 120#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
 121#define TTM_PAGE_FLAG_WRITE           (1 << 3)
 122#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
 123#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
 124#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
 125#define TTM_PAGE_FLAG_DMA32           (1 << 7)
 126
 127enum ttm_caching_state {
 128        tt_uncached,
 129        tt_wc,
 130        tt_cached
 131};
 132
 133/**
 134 * struct ttm_tt
 135 *
 136 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
 137 * pointer.
 138 * @pages: Array of pages backing the data.
 139 * @first_himem_page: Himem pages are put last in the page array, which
 140 * enables us to run caching attribute changes on only the first part
 141 * of the page array containing lomem pages. This is the index of the
 142 * first himem page.
 143 * @last_lomem_page: Index of the last lomem page in the page array.
 144 * @num_pages: Number of pages in the page array.
 145 * @bdev: Pointer to the current struct ttm_bo_device.
 146 * @be: Pointer to the ttm backend.
 147 * @tsk: The task for user ttm.
 148 * @start: virtual address for user ttm.
 149 * @swap_storage: Pointer to shmem struct file for swap storage.
 150 * @caching_state: The current caching state of the pages.
 151 * @state: The current binding state of the pages.
 152 *
 153 * This is a structure holding the pages, caching- and aperture binding
 154 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
 155 * memory.
 156 */
 157
 158struct ttm_tt {
 159        struct page *dummy_read_page;
 160        struct page **pages;
 161        long first_himem_page;
 162        long last_lomem_page;
 163        uint32_t page_flags;
 164        unsigned long num_pages;
 165        struct ttm_bo_global *glob;
 166        struct ttm_backend *be;
 167        struct task_struct *tsk;
 168        unsigned long start;
 169        struct file *swap_storage;
 170        enum ttm_caching_state caching_state;
 171        enum {
 172                tt_bound,
 173                tt_unbound,
 174                tt_unpopulated,
 175        } state;
 176};
 177
 178#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0) /* Fixed (on-card) PCI memory */
 179#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1) /* Memory mappable */
 180#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
 181                                                   before kernel access. */
 182#define TTM_MEMTYPE_FLAG_CMA           (1 << 3) /* Can't map aperture */
 183
 184/**
 185 * struct ttm_mem_type_manager
 186 *
 187 * @has_type: The memory type has been initialized.
 188 * @use_type: The memory type is enabled.
 189 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
 190 * managed by this memory type.
 191 * @gpu_offset: If used, the GPU offset of the first managed page of
 192 * fixed memory or the first managed location in an aperture.
 193 * @io_offset: The io_offset of the first managed page of IO memory or
 194 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
 195 * memory, this should be set to NULL.
 196 * @io_size: The size of a managed IO region (fixed memory or aperture).
 197 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
 198 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
 199 * @io_addr should be set to NULL.
 200 * @size: Size of the managed region.
 201 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
 202 * as defined in ttm_placement_common.h
 203 * @default_caching: The default caching policy used for a buffer object
 204 * placed in this memory type if the user doesn't provide one.
 205 * @manager: The range manager used for this memory type. FIXME: If the aperture
 206 * has a page size different from the underlying system, the granularity
 207 * of this manager should take care of this. But the range allocating code
 208 * in ttm_bo.c needs to be modified for this.
 209 * @lru: The lru list for this memory type.
 210 *
 211 * This structure is used to identify and manage memory types for a device.
 212 * It's set up by the ttm_bo_driver::init_mem_type method.
 213 */
 214
 215struct ttm_mem_type_manager {
 216
 217        /*
 218         * No protection. Constant from start.
 219         */
 220
 221        bool has_type;
 222        bool use_type;
 223        uint32_t flags;
 224        unsigned long gpu_offset;
 225        unsigned long io_offset;
 226        unsigned long io_size;
 227        void *io_addr;
 228        uint64_t size;
 229        uint32_t available_caching;
 230        uint32_t default_caching;
 231
 232        /*
 233         * Protected by the bdev->lru_lock.
 234         * TODO: Consider one lru_lock per ttm_mem_type_manager.
 235         * Plays ill with list removal, though.
 236         */
 237
 238        struct drm_mm manager;
 239        struct list_head lru;
 240};
 241
 242/**
 243 * struct ttm_bo_driver
 244 *
 245 * @mem_type_prio: Priority array of memory types to place a buffer object in
 246 * if it fits without evicting buffers from any of these memory types.
 247 * @mem_busy_prio: Priority array of memory types to place a buffer object in
 248 * if it needs to evict buffers to make room.
 249 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
 250 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
 251 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
 252 * @invalidate_caches: Callback to invalidate read caches when a buffer object
 253 * has been evicted.
 254 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
 255 * structure.
 256 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
 257 * @move: Callback for a driver to hook in accelerated functions to
 258 * move a buffer.
 259 * If set to NULL, a potentially slow memcpy() move is used.
 260 * @sync_obj_signaled: See ttm_fence_api.h
 261 * @sync_obj_wait: See ttm_fence_api.h
 262 * @sync_obj_flush: See ttm_fence_api.h
 263 * @sync_obj_unref: See ttm_fence_api.h
 264 * @sync_obj_ref: See ttm_fence_api.h
 265 */
 266
 267struct ttm_bo_driver {
 268        const uint32_t *mem_type_prio;
 269        const uint32_t *mem_busy_prio;
 270        uint32_t num_mem_type_prio;
 271        uint32_t num_mem_busy_prio;
 272
 273        /**
 274         * struct ttm_bo_driver member create_ttm_backend_entry
 275         *
 276         * @bdev: The buffer object device.
 277         *
 278         * Create a driver specific struct ttm_backend.
 279         */
 280
 281        struct ttm_backend *(*create_ttm_backend_entry)
 282         (struct ttm_bo_device *bdev);
 283
 284        /**
 285         * struct ttm_bo_driver member invalidate_caches
 286         *
 287         * @bdev: the buffer object device.
 288         * @flags: new placement of the rebound buffer object.
 289         *
 290         * A previosly evicted buffer has been rebound in a
 291         * potentially new location. Tell the driver that it might
 292         * consider invalidating read (texture) caches on the next command
 293         * submission as a consequence.
 294         */
 295
 296        int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
 297        int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
 298                              struct ttm_mem_type_manager *man);
 299        /**
 300         * struct ttm_bo_driver member evict_flags:
 301         *
 302         * @bo: the buffer object to be evicted
 303         *
 304         * Return the bo flags for a buffer which is not mapped to the hardware.
 305         * These will be placed in proposed_flags so that when the move is
 306         * finished, they'll end up in bo->mem.flags
 307         */
 308
 309         uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
 310        /**
 311         * struct ttm_bo_driver member move:
 312         *
 313         * @bo: the buffer to move
 314         * @evict: whether this motion is evicting the buffer from
 315         * the graphics address space
 316         * @interruptible: Use interruptible sleeps if possible when sleeping.
 317         * @no_wait: whether this should give up and return -EBUSY
 318         * if this move would require sleeping
 319         * @new_mem: the new memory region receiving the buffer
 320         *
 321         * Move a buffer between two memory regions.
 322         */
 323        int (*move) (struct ttm_buffer_object *bo,
 324                     bool evict, bool interruptible,
 325                     bool no_wait, struct ttm_mem_reg *new_mem);
 326
 327        /**
 328         * struct ttm_bo_driver_member verify_access
 329         *
 330         * @bo: Pointer to a buffer object.
 331         * @filp: Pointer to a struct file trying to access the object.
 332         *
 333         * Called from the map / write / read methods to verify that the
 334         * caller is permitted to access the buffer object.
 335         * This member may be set to NULL, which will refuse this kind of
 336         * access for all buffer objects.
 337         * This function should return 0 if access is granted, -EPERM otherwise.
 338         */
 339        int (*verify_access) (struct ttm_buffer_object *bo,
 340                              struct file *filp);
 341
 342        /**
 343         * In case a driver writer dislikes the TTM fence objects,
 344         * the driver writer can replace those with sync objects of
 345         * his / her own. If it turns out that no driver writer is
 346         * using these. I suggest we remove these hooks and plug in
 347         * fences directly. The bo driver needs the following functionality:
 348         * See the corresponding functions in the fence object API
 349         * documentation.
 350         */
 351
 352        bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
 353        int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
 354                              bool lazy, bool interruptible);
 355        int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
 356        void (*sync_obj_unref) (void **sync_obj);
 357        void *(*sync_obj_ref) (void *sync_obj);
 358
 359        /* hook to notify driver about a driver move so it
 360         * can do tiling things */
 361        void (*move_notify)(struct ttm_buffer_object *bo,
 362                            struct ttm_mem_reg *new_mem);
 363        /* notify the driver we are taking a fault on this BO
 364         * and have reserved it */
 365        void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 366};
 367
 368/**
 369 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
 370 */
 371
 372struct ttm_bo_global_ref {
 373        struct ttm_global_reference ref;
 374        struct ttm_mem_global *mem_glob;
 375};
 376
 377/**
 378 * struct ttm_bo_global - Buffer object driver global data.
 379 *
 380 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
 381 * @dummy_read_page: Pointer to a dummy page used for mapping requests
 382 * of unpopulated pages.
 383 * @shrink: A shrink callback object used for buffer object swap.
 384 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
 385 * used by a buffer object. This is excluding page arrays and backing pages.
 386 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
 387 * @device_list_mutex: Mutex protecting the device list.
 388 * This mutex is held while traversing the device list for pm options.
 389 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
 390 * @device_list: List of buffer object devices.
 391 * @swap_lru: Lru list of buffer objects used for swapping.
 392 */
 393
 394struct ttm_bo_global {
 395
 396        /**
 397         * Constant after init.
 398         */
 399
 400        struct kobject kobj;
 401        struct ttm_mem_global *mem_glob;
 402        struct page *dummy_read_page;
 403        struct ttm_mem_shrink shrink;
 404        size_t ttm_bo_extra_size;
 405        size_t ttm_bo_size;
 406        struct mutex device_list_mutex;
 407        spinlock_t lru_lock;
 408
 409        /**
 410         * Protected by device_list_mutex.
 411         */
 412        struct list_head device_list;
 413
 414        /**
 415         * Protected by the lru_lock.
 416         */
 417        struct list_head swap_lru;
 418
 419        /**
 420         * Internal protection.
 421         */
 422        atomic_t bo_count;
 423};
 424
 425
 426#define TTM_NUM_MEM_TYPES 8
 427
 428#define TTM_BO_PRIV_FLAG_MOVING  0      /* Buffer object is moving and needs
 429                                           idling before CPU mapping */
 430#define TTM_BO_PRIV_FLAG_MAX 1
 431/**
 432 * struct ttm_bo_device - Buffer object driver device-specific data.
 433 *
 434 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
 435 * @man: An array of mem_type_managers.
 436 * @addr_space_mm: Range manager for the device address space.
 437 * lru_lock: Spinlock that protects the buffer+device lru lists and
 438 * ddestroy lists.
 439 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
 440 * If a GPU lockup has been detected, this is forced to 0.
 441 * @dev_mapping: A pointer to the struct address_space representing the
 442 * device address space.
 443 * @wq: Work queue structure for the delayed delete workqueue.
 444 *
 445 */
 446
 447struct ttm_bo_device {
 448
 449        /*
 450         * Constant after bo device init / atomic.
 451         */
 452        struct list_head device_list;
 453        struct ttm_bo_global *glob;
 454        struct ttm_bo_driver *driver;
 455        rwlock_t vm_lock;
 456        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
 457        /*
 458         * Protected by the vm lock.
 459         */
 460        struct rb_root addr_space_rb;
 461        struct drm_mm addr_space_mm;
 462
 463        /*
 464         * Protected by the global:lru lock.
 465         */
 466        struct list_head ddestroy;
 467
 468        /*
 469         * Protected by load / firstopen / lastclose /unload sync.
 470         */
 471
 472        bool nice_mode;
 473        struct address_space *dev_mapping;
 474
 475        /*
 476         * Internal protection.
 477         */
 478
 479        struct delayed_work wq;
 480
 481        bool need_dma32;
 482};
 483
 484/**
 485 * ttm_flag_masked
 486 *
 487 * @old: Pointer to the result and original value.
 488 * @new: New value of bits.
 489 * @mask: Mask of bits to change.
 490 *
 491 * Convenience function to change a number of bits identified by a mask.
 492 */
 493
 494static inline uint32_t
 495ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 496{
 497        *old ^= (*old ^ new) & mask;
 498        return *old;
 499}
 500
 501/**
 502 * ttm_tt_create
 503 *
 504 * @bdev: pointer to a struct ttm_bo_device:
 505 * @size: Size of the data needed backing.
 506 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
 507 * @dummy_read_page: See struct ttm_bo_device.
 508 *
 509 * Create a struct ttm_tt to back data with system memory pages.
 510 * No pages are actually allocated.
 511 * Returns:
 512 * NULL: Out of memory.
 513 */
 514extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
 515                                    unsigned long size,
 516                                    uint32_t page_flags,
 517                                    struct page *dummy_read_page);
 518
 519/**
 520 * ttm_tt_set_user:
 521 *
 522 * @ttm: The struct ttm_tt to populate.
 523 * @tsk: A struct task_struct for which @start is a valid user-space address.
 524 * @start: A valid user-space address.
 525 * @num_pages: Size in pages of the user memory area.
 526 *
 527 * Populate a struct ttm_tt with a user-space memory area after first pinning
 528 * the pages backing it.
 529 * Returns:
 530 * !0: Error.
 531 */
 532
 533extern int ttm_tt_set_user(struct ttm_tt *ttm,
 534                           struct task_struct *tsk,
 535                           unsigned long start, unsigned long num_pages);
 536
 537/**
 538 * ttm_ttm_bind:
 539 *
 540 * @ttm: The struct ttm_tt containing backing pages.
 541 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
 542 *
 543 * Bind the pages of @ttm to an aperture location identified by @bo_mem
 544 */
 545extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 546
 547/**
 548 * ttm_ttm_destroy:
 549 *
 550 * @ttm: The struct ttm_tt.
 551 *
 552 * Unbind, unpopulate and destroy a struct ttm_tt.
 553 */
 554extern void ttm_tt_destroy(struct ttm_tt *ttm);
 555
 556/**
 557 * ttm_ttm_unbind:
 558 *
 559 * @ttm: The struct ttm_tt.
 560 *
 561 * Unbind a struct ttm_tt.
 562 */
 563extern void ttm_tt_unbind(struct ttm_tt *ttm);
 564
 565/**
 566 * ttm_ttm_destroy:
 567 *
 568 * @ttm: The struct ttm_tt.
 569 * @index: Index of the desired page.
 570 *
 571 * Return a pointer to the struct page backing @ttm at page
 572 * index @index. If the page is unpopulated, one will be allocated to
 573 * populate that index.
 574 *
 575 * Returns:
 576 * NULL on OOM.
 577 */
 578extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
 579
 580/**
 581 * ttm_tt_cache_flush:
 582 *
 583 * @pages: An array of pointers to struct page:s to flush.
 584 * @num_pages: Number of pages to flush.
 585 *
 586 * Flush the data of the indicated pages from the cpu caches.
 587 * This is used when changing caching attributes of the pages from
 588 * cache-coherent.
 589 */
 590extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
 591
 592/**
 593 * ttm_tt_set_placement_caching:
 594 *
 595 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
 596 * @placement: Flag indicating the desired caching policy.
 597 *
 598 * This function will change caching policy of any default kernel mappings of
 599 * the pages backing @ttm. If changing from cached to uncached or
 600 * write-combined,
 601 * all CPU caches will first be flushed to make sure the data of the pages
 602 * hit RAM. This function may be very costly as it involves global TLB
 603 * and cache flushes and potential page splitting / combining.
 604 */
 605extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
 606extern int ttm_tt_swapout(struct ttm_tt *ttm,
 607                          struct file *persistant_swap_storage);
 608
 609/*
 610 * ttm_bo.c
 611 */
 612
 613/**
 614 * ttm_mem_reg_is_pci
 615 *
 616 * @bdev: Pointer to a struct ttm_bo_device.
 617 * @mem: A valid struct ttm_mem_reg.
 618 *
 619 * Returns true if the memory described by @mem is PCI memory,
 620 * false otherwise.
 621 */
 622extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
 623                                   struct ttm_mem_reg *mem);
 624
 625/**
 626 * ttm_bo_mem_space
 627 *
 628 * @bo: Pointer to a struct ttm_buffer_object. the data of which
 629 * we want to allocate space for.
 630 * @proposed_placement: Proposed new placement for the buffer object.
 631 * @mem: A struct ttm_mem_reg.
 632 * @interruptible: Sleep interruptible when sliping.
 633 * @no_wait: Don't sleep waiting for space to become available.
 634 *
 635 * Allocate memory space for the buffer object pointed to by @bo, using
 636 * the placement flags in @mem, potentially evicting other idle buffer objects.
 637 * This function may sleep while waiting for space to become available.
 638 * Returns:
 639 * -EBUSY: No space available (only if no_wait == 1).
 640 * -ENOMEM: Could not allocate memory for the buffer object, either due to
 641 * fragmentation or concurrent allocators.
 642 * -ERESTART: An interruptible sleep was interrupted by a signal.
 643 */
 644extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 645                            uint32_t proposed_placement,
 646                            struct ttm_mem_reg *mem,
 647                            bool interruptible, bool no_wait);
 648/**
 649 * ttm_bo_wait_for_cpu
 650 *
 651 * @bo: Pointer to a struct ttm_buffer_object.
 652 * @no_wait: Don't sleep while waiting.
 653 *
 654 * Wait until a buffer object is no longer sync'ed for CPU access.
 655 * Returns:
 656 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
 657 * -ERESTART: An interruptible sleep was interrupted by a signal.
 658 */
 659
 660extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
 661
 662/**
 663 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
 664 *
 665 * @bo Pointer to a struct ttm_buffer_object.
 666 * @bus_base On return the base of the PCI region
 667 * @bus_offset On return the byte offset into the PCI region
 668 * @bus_size On return the byte size of the buffer object or zero if
 669 * the buffer object memory is not accessible through a PCI region.
 670 *
 671 * Returns:
 672 * -EINVAL if the buffer object is currently not mappable.
 673 * 0 otherwise.
 674 */
 675
 676extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
 677                             struct ttm_mem_reg *mem,
 678                             unsigned long *bus_base,
 679                             unsigned long *bus_offset,
 680                             unsigned long *bus_size);
 681
 682extern void ttm_bo_global_release(struct ttm_global_reference *ref);
 683extern int ttm_bo_global_init(struct ttm_global_reference *ref);
 684
 685extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
 686
 687/**
 688 * ttm_bo_device_init
 689 *
 690 * @bdev: A pointer to a struct ttm_bo_device to initialize.
 691 * @mem_global: A pointer to an initialized struct ttm_mem_global.
 692 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
 693 * @file_page_offset: Offset into the device address space that is available
 694 * for buffer data. This ensures compatibility with other users of the
 695 * address space.
 696 *
 697 * Initializes a struct ttm_bo_device:
 698 * Returns:
 699 * !0: Failure.
 700 */
 701extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
 702                              struct ttm_bo_global *glob,
 703                              struct ttm_bo_driver *driver,
 704                              uint64_t file_page_offset, bool need_dma32);
 705
 706/**
 707 * ttm_bo_unmap_virtual
 708 *
 709 * @bo: tear down the virtual mappings for this BO
 710 */
 711extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 712
 713/**
 714 * ttm_bo_reserve:
 715 *
 716 * @bo: A pointer to a struct ttm_buffer_object.
 717 * @interruptible: Sleep interruptible if waiting.
 718 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 719 * @use_sequence: If @bo is already reserved, Only sleep waiting for
 720 * it to become unreserved if @sequence < (@bo)->sequence.
 721 *
 722 * Locks a buffer object for validation. (Or prevents other processes from
 723 * locking it for validation) and removes it from lru lists, while taking
 724 * a number of measures to prevent deadlocks.
 725 *
 726 * Deadlocks may occur when two processes try to reserve multiple buffers in
 727 * different order, either by will or as a result of a buffer being evicted
 728 * to make room for a buffer already reserved. (Buffers are reserved before
 729 * they are evicted). The following algorithm prevents such deadlocks from
 730 * occuring:
 731 * 1) Buffers are reserved with the lru spinlock held. Upon successful
 732 * reservation they are removed from the lru list. This stops a reserved buffer
 733 * from being evicted. However the lru spinlock is released between the time
 734 * a buffer is selected for eviction and the time it is reserved.
 735 * Therefore a check is made when a buffer is reserved for eviction, that it
 736 * is still the first buffer in the lru list, before it is removed from the
 737 * list. @check_lru == 1 forces this check. If it fails, the function returns
 738 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
 739 * the procedure.
 740 * 2) Processes attempting to reserve multiple buffers other than for eviction,
 741 * (typically execbuf), should first obtain a unique 32-bit
 742 * validation sequence number,
 743 * and call this function with @use_sequence == 1 and @sequence == the unique
 744 * sequence number. If upon call of this function, the buffer object is already
 745 * reserved, the validation sequence is checked against the validation
 746 * sequence of the process currently reserving the buffer,
 747 * and if the current validation sequence is greater than that of the process
 748 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
 749 * waiting for the buffer to become unreserved, after which it retries
 750 * reserving.
 751 * The caller should, when receiving an -EAGAIN error
 752 * release all its buffer reservations, wait for @bo to become unreserved, and
 753 * then rerun the validation with the same validation sequence. This procedure
 754 * will always guarantee that the process with the lowest validation sequence
 755 * will eventually succeed, preventing both deadlocks and starvation.
 756 *
 757 * Returns:
 758 * -EAGAIN: The reservation may cause a deadlock.
 759 * Release all buffer reservations, wait for @bo to become unreserved and
 760 * try again. (only if use_sequence == 1).
 761 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
 762 * a signal. Release all buffer reservations and return to user-space.
 763 */
 764extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 765                          bool interruptible,
 766                          bool no_wait, bool use_sequence, uint32_t sequence);
 767
 768/**
 769 * ttm_bo_unreserve
 770 *
 771 * @bo: A pointer to a struct ttm_buffer_object.
 772 *
 773 * Unreserve a previous reservation of @bo.
 774 */
 775extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
 776
 777/**
 778 * ttm_bo_wait_unreserved
 779 *
 780 * @bo: A pointer to a struct ttm_buffer_object.
 781 *
 782 * Wait for a struct ttm_buffer_object to become unreserved.
 783 * This is typically used in the execbuf code to relax cpu-usage when
 784 * a potential deadlock condition backoff.
 785 */
 786extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
 787                                  bool interruptible);
 788
 789/**
 790 * ttm_bo_block_reservation
 791 *
 792 * @bo: A pointer to a struct ttm_buffer_object.
 793 * @interruptible: Use interruptible sleep when waiting.
 794 * @no_wait: Don't sleep, but rather return -EBUSY.
 795 *
 796 * Block reservation for validation by simply reserving the buffer.
 797 * This is intended for single buffer use only without eviction,
 798 * and thus needs no deadlock protection.
 799 *
 800 * Returns:
 801 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
 802 * -ERESTART: If interruptible == 1 and the process received a signal
 803 * while sleeping.
 804 */
 805extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
 806                                    bool interruptible, bool no_wait);
 807
 808/**
 809 * ttm_bo_unblock_reservation
 810 *
 811 * @bo: A pointer to a struct ttm_buffer_object.
 812 *
 813 * Unblocks reservation leaving lru lists untouched.
 814 */
 815extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
 816
 817/*
 818 * ttm_bo_util.c
 819 */
 820
 821/**
 822 * ttm_bo_move_ttm
 823 *
 824 * @bo: A pointer to a struct ttm_buffer_object.
 825 * @evict: 1: This is an eviction. Don't try to pipeline.
 826 * @no_wait: Never sleep, but rather return with -EBUSY.
 827 * @new_mem: struct ttm_mem_reg indicating where to move.
 828 *
 829 * Optimized move function for a buffer object with both old and
 830 * new placement backed by a TTM. The function will, if successful,
 831 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
 832 * and update the (@bo)->mem placement flags. If unsuccessful, the old
 833 * data remains untouched, and it's up to the caller to free the
 834 * memory space indicated by @new_mem.
 835 * Returns:
 836 * !0: Failure.
 837 */
 838
 839extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 840                           bool evict, bool no_wait,
 841                           struct ttm_mem_reg *new_mem);
 842
 843/**
 844 * ttm_bo_move_memcpy
 845 *
 846 * @bo: A pointer to a struct ttm_buffer_object.
 847 * @evict: 1: This is an eviction. Don't try to pipeline.
 848 * @no_wait: Never sleep, but rather return with -EBUSY.
 849 * @new_mem: struct ttm_mem_reg indicating where to move.
 850 *
 851 * Fallback move function for a mappable buffer object in mappable memory.
 852 * The function will, if successful,
 853 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
 854 * and update the (@bo)->mem placement flags. If unsuccessful, the old
 855 * data remains untouched, and it's up to the caller to free the
 856 * memory space indicated by @new_mem.
 857 * Returns:
 858 * !0: Failure.
 859 */
 860
 861extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 862                              bool evict,
 863                              bool no_wait, struct ttm_mem_reg *new_mem);
 864
 865/**
 866 * ttm_bo_free_old_node
 867 *
 868 * @bo: A pointer to a struct ttm_buffer_object.
 869 *
 870 * Utility function to free an old placement after a successful move.
 871 */
 872extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 873
 874/**
 875 * ttm_bo_move_accel_cleanup.
 876 *
 877 * @bo: A pointer to a struct ttm_buffer_object.
 878 * @sync_obj: A sync object that signals when moving is complete.
 879 * @sync_obj_arg: An argument to pass to the sync object idle / wait
 880 * functions.
 881 * @evict: This is an evict move. Don't return until the buffer is idle.
 882 * @no_wait: Never sleep, but rather return with -EBUSY.
 883 * @new_mem: struct ttm_mem_reg indicating where to move.
 884 *
 885 * Accelerated move function to be called when an accelerated move
 886 * has been scheduled. The function will create a new temporary buffer object
 887 * representing the old placement, and put the sync object on both buffer
 888 * objects. After that the newly created buffer object is unref'd to be
 889 * destroyed when the move is complete. This will help pipeline
 890 * buffer moves.
 891 */
 892
 893extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 894                                     void *sync_obj,
 895                                     void *sync_obj_arg,
 896                                     bool evict, bool no_wait,
 897                                     struct ttm_mem_reg *new_mem);
 898/**
 899 * ttm_io_prot
 900 *
 901 * @c_state: Caching state.
 902 * @tmp: Page protection flag for a normal, cached mapping.
 903 *
 904 * Utility function that returns the pgprot_t that should be used for
 905 * setting up a PTE with the caching model indicated by @c_state.
 906 */
 907extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
 908
 909#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
 910#define TTM_HAS_AGP
 911#include <linux/agp_backend.h>
 912
 913/**
 914 * ttm_agp_backend_init
 915 *
 916 * @bdev: Pointer to a struct ttm_bo_device.
 917 * @bridge: The agp bridge this device is sitting on.
 918 *
 919 * Create a TTM backend that uses the indicated AGP bridge as an aperture
 920 * for TT memory. This function uses the linux agpgart interface to
 921 * bind and unbind memory backing a ttm_tt.
 922 */
 923extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
 924                                                struct agp_bridge_data *bridge);
 925#endif
 926
 927#endif
 928