linux/drivers/gpu/drm/ttm/ttm_bo.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29 */
  30
  31#include "ttm/ttm_module.h"
  32#include "ttm/ttm_bo_driver.h"
  33#include "ttm/ttm_placement.h"
  34#include <linux/jiffies.h>
  35#include <linux/slab.h>
  36#include <linux/sched.h>
  37#include <linux/mm.h>
  38#include <linux/file.h>
  39#include <linux/module.h>
  40
  41#define TTM_ASSERT_LOCKED(param)
  42#define TTM_DEBUG(fmt, arg...)
  43#define TTM_BO_HASH_ORDER 13
  44
  45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  46static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  47static void ttm_bo_global_kobj_release(struct kobject *kobj);
  48
  49static struct attribute ttm_bo_count = {
  50        .name = "bo_count",
  51        .mode = S_IRUGO
  52};
  53
  54static ssize_t ttm_bo_global_show(struct kobject *kobj,
  55                                  struct attribute *attr,
  56                                  char *buffer)
  57{
  58        struct ttm_bo_global *glob =
  59                container_of(kobj, struct ttm_bo_global, kobj);
  60
  61        return snprintf(buffer, PAGE_SIZE, "%lu\n",
  62                        (unsigned long) atomic_read(&glob->bo_count));
  63}
  64
  65static struct attribute *ttm_bo_global_attrs[] = {
  66        &ttm_bo_count,
  67        NULL
  68};
  69
  70static struct sysfs_ops ttm_bo_global_ops = {
  71        .show = &ttm_bo_global_show
  72};
  73
  74static struct kobj_type ttm_bo_glob_kobj_type  = {
  75        .release = &ttm_bo_global_kobj_release,
  76        .sysfs_ops = &ttm_bo_global_ops,
  77        .default_attrs = ttm_bo_global_attrs
  78};
  79
  80
  81static inline uint32_t ttm_bo_type_flags(unsigned type)
  82{
  83        return 1 << (type);
  84}
  85
  86static void ttm_bo_release_list(struct kref *list_kref)
  87{
  88        struct ttm_buffer_object *bo =
  89            container_of(list_kref, struct ttm_buffer_object, list_kref);
  90        struct ttm_bo_device *bdev = bo->bdev;
  91
  92        BUG_ON(atomic_read(&bo->list_kref.refcount));
  93        BUG_ON(atomic_read(&bo->kref.refcount));
  94        BUG_ON(atomic_read(&bo->cpu_writers));
  95        BUG_ON(bo->sync_obj != NULL);
  96        BUG_ON(bo->mem.mm_node != NULL);
  97        BUG_ON(!list_empty(&bo->lru));
  98        BUG_ON(!list_empty(&bo->ddestroy));
  99
 100        if (bo->ttm)
 101                ttm_tt_destroy(bo->ttm);
 102        atomic_dec(&bo->glob->bo_count);
 103        if (bo->destroy)
 104                bo->destroy(bo);
 105        else {
 106                ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
 107                kfree(bo);
 108        }
 109}
 110
 111int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 112{
 113
 114        if (interruptible) {
 115                int ret = 0;
 116
 117                ret = wait_event_interruptible(bo->event_queue,
 118                                               atomic_read(&bo->reserved) == 0);
 119                if (unlikely(ret != 0))
 120                        return -ERESTART;
 121        } else {
 122                wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
 123        }
 124        return 0;
 125}
 126
 127static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 128{
 129        struct ttm_bo_device *bdev = bo->bdev;
 130        struct ttm_mem_type_manager *man;
 131
 132        BUG_ON(!atomic_read(&bo->reserved));
 133
 134        if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 135
 136                BUG_ON(!list_empty(&bo->lru));
 137
 138                man = &bdev->man[bo->mem.mem_type];
 139                list_add_tail(&bo->lru, &man->lru);
 140                kref_get(&bo->list_kref);
 141
 142                if (bo->ttm != NULL) {
 143                        list_add_tail(&bo->swap, &bo->glob->swap_lru);
 144                        kref_get(&bo->list_kref);
 145                }
 146        }
 147}
 148
 149/**
 150 * Call with the lru_lock held.
 151 */
 152
 153static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 154{
 155        int put_count = 0;
 156
 157        if (!list_empty(&bo->swap)) {
 158                list_del_init(&bo->swap);
 159                ++put_count;
 160        }
 161        if (!list_empty(&bo->lru)) {
 162                list_del_init(&bo->lru);
 163                ++put_count;
 164        }
 165
 166        /*
 167         * TODO: Add a driver hook to delete from
 168         * driver-specific LRU's here.
 169         */
 170
 171        return put_count;
 172}
 173
 174int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
 175                          bool interruptible,
 176                          bool no_wait, bool use_sequence, uint32_t sequence)
 177{
 178        struct ttm_bo_global *glob = bo->glob;
 179        int ret;
 180
 181        while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
 182                if (use_sequence && bo->seq_valid &&
 183                        (sequence - bo->val_seq < (1 << 31))) {
 184                        return -EAGAIN;
 185                }
 186
 187                if (no_wait)
 188                        return -EBUSY;
 189
 190                spin_unlock(&glob->lru_lock);
 191                ret = ttm_bo_wait_unreserved(bo, interruptible);
 192                spin_lock(&glob->lru_lock);
 193
 194                if (unlikely(ret))
 195                        return ret;
 196        }
 197
 198        if (use_sequence) {
 199                bo->val_seq = sequence;
 200                bo->seq_valid = true;
 201        } else {
 202                bo->seq_valid = false;
 203        }
 204
 205        return 0;
 206}
 207EXPORT_SYMBOL(ttm_bo_reserve);
 208
 209static void ttm_bo_ref_bug(struct kref *list_kref)
 210{
 211        BUG();
 212}
 213
 214int ttm_bo_reserve(struct ttm_buffer_object *bo,
 215                   bool interruptible,
 216                   bool no_wait, bool use_sequence, uint32_t sequence)
 217{
 218        struct ttm_bo_global *glob = bo->glob;
 219        int put_count = 0;
 220        int ret;
 221
 222        spin_lock(&glob->lru_lock);
 223        ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
 224                                    sequence);
 225        if (likely(ret == 0))
 226                put_count = ttm_bo_del_from_lru(bo);
 227        spin_unlock(&glob->lru_lock);
 228
 229        while (put_count--)
 230                kref_put(&bo->list_kref, ttm_bo_ref_bug);
 231
 232        return ret;
 233}
 234
 235void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 236{
 237        struct ttm_bo_global *glob = bo->glob;
 238
 239        spin_lock(&glob->lru_lock);
 240        ttm_bo_add_to_lru(bo);
 241        atomic_set(&bo->reserved, 0);
 242        wake_up_all(&bo->event_queue);
 243        spin_unlock(&glob->lru_lock);
 244}
 245EXPORT_SYMBOL(ttm_bo_unreserve);
 246
 247/*
 248 * Call bo->mutex locked.
 249 */
 250
 251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 252{
 253        struct ttm_bo_device *bdev = bo->bdev;
 254        struct ttm_bo_global *glob = bo->glob;
 255        int ret = 0;
 256        uint32_t page_flags = 0;
 257
 258        TTM_ASSERT_LOCKED(&bo->mutex);
 259        bo->ttm = NULL;
 260
 261        if (bdev->need_dma32)
 262                page_flags |= TTM_PAGE_FLAG_DMA32;
 263
 264        switch (bo->type) {
 265        case ttm_bo_type_device:
 266                if (zero_alloc)
 267                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 268        case ttm_bo_type_kernel:
 269                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 270                                        page_flags, glob->dummy_read_page);
 271                if (unlikely(bo->ttm == NULL))
 272                        ret = -ENOMEM;
 273                break;
 274        case ttm_bo_type_user:
 275                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 276                                        page_flags | TTM_PAGE_FLAG_USER,
 277                                        glob->dummy_read_page);
 278                if (unlikely(bo->ttm == NULL))
 279                        ret = -ENOMEM;
 280                break;
 281
 282                ret = ttm_tt_set_user(bo->ttm, current,
 283                                      bo->buffer_start, bo->num_pages);
 284                if (unlikely(ret != 0))
 285                        ttm_tt_destroy(bo->ttm);
 286                break;
 287        default:
 288                printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
 289                ret = -EINVAL;
 290                break;
 291        }
 292
 293        return ret;
 294}
 295
 296static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 297                                  struct ttm_mem_reg *mem,
 298                                  bool evict, bool interruptible, bool no_wait)
 299{
 300        struct ttm_bo_device *bdev = bo->bdev;
 301        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 302        bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 303        struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 304        struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 305        int ret = 0;
 306
 307        if (old_is_pci || new_is_pci ||
 308            ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
 309                ttm_bo_unmap_virtual(bo);
 310
 311        /*
 312         * Create and bind a ttm if required.
 313         */
 314
 315        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
 316                ret = ttm_bo_add_ttm(bo, false);
 317                if (ret)
 318                        goto out_err;
 319
 320                ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 321                if (ret)
 322                        goto out_err;
 323
 324                if (mem->mem_type != TTM_PL_SYSTEM) {
 325                        ret = ttm_tt_bind(bo->ttm, mem);
 326                        if (ret)
 327                                goto out_err;
 328                }
 329
 330                if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 331
 332                        struct ttm_mem_reg *old_mem = &bo->mem;
 333                        uint32_t save_flags = old_mem->placement;
 334
 335                        *old_mem = *mem;
 336                        mem->mm_node = NULL;
 337                        ttm_flag_masked(&save_flags, mem->placement,
 338                                        TTM_PL_MASK_MEMTYPE);
 339                        goto moved;
 340                }
 341
 342        }
 343
 344        if (bdev->driver->move_notify)
 345                bdev->driver->move_notify(bo, mem);
 346
 347        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 348            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 349                ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
 350        else if (bdev->driver->move)
 351                ret = bdev->driver->move(bo, evict, interruptible,
 352                                         no_wait, mem);
 353        else
 354                ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
 355
 356        if (ret)
 357                goto out_err;
 358
 359moved:
 360        if (bo->evicted) {
 361                ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 362                if (ret)
 363                        printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
 364                bo->evicted = false;
 365        }
 366
 367        if (bo->mem.mm_node) {
 368                spin_lock(&bo->lock);
 369                bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
 370                    bdev->man[bo->mem.mem_type].gpu_offset;
 371                bo->cur_placement = bo->mem.placement;
 372                spin_unlock(&bo->lock);
 373        }
 374
 375        return 0;
 376
 377out_err:
 378        new_man = &bdev->man[bo->mem.mem_type];
 379        if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
 380                ttm_tt_unbind(bo->ttm);
 381                ttm_tt_destroy(bo->ttm);
 382                bo->ttm = NULL;
 383        }
 384
 385        return ret;
 386}
 387
 388/**
 389 * If bo idle, remove from delayed- and lru lists, and unref.
 390 * If not idle, and already on delayed list, do nothing.
 391 * If not idle, and not on delayed list, put on delayed list,
 392 *   up the list_kref and schedule a delayed list check.
 393 */
 394
 395static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 396{
 397        struct ttm_bo_device *bdev = bo->bdev;
 398        struct ttm_bo_global *glob = bo->glob;
 399        struct ttm_bo_driver *driver = bdev->driver;
 400        int ret;
 401
 402        spin_lock(&bo->lock);
 403        (void) ttm_bo_wait(bo, false, false, !remove_all);
 404
 405        if (!bo->sync_obj) {
 406                int put_count;
 407
 408                spin_unlock(&bo->lock);
 409
 410                spin_lock(&glob->lru_lock);
 411                ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
 412                BUG_ON(ret);
 413                if (bo->ttm)
 414                        ttm_tt_unbind(bo->ttm);
 415
 416                if (!list_empty(&bo->ddestroy)) {
 417                        list_del_init(&bo->ddestroy);
 418                        kref_put(&bo->list_kref, ttm_bo_ref_bug);
 419                }
 420                if (bo->mem.mm_node) {
 421                        drm_mm_put_block(bo->mem.mm_node);
 422                        bo->mem.mm_node = NULL;
 423                }
 424                put_count = ttm_bo_del_from_lru(bo);
 425                spin_unlock(&glob->lru_lock);
 426
 427                atomic_set(&bo->reserved, 0);
 428
 429                while (put_count--)
 430                        kref_put(&bo->list_kref, ttm_bo_release_list);
 431
 432                return 0;
 433        }
 434
 435        spin_lock(&glob->lru_lock);
 436        if (list_empty(&bo->ddestroy)) {
 437                void *sync_obj = bo->sync_obj;
 438                void *sync_obj_arg = bo->sync_obj_arg;
 439
 440                kref_get(&bo->list_kref);
 441                list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 442                spin_unlock(&glob->lru_lock);
 443                spin_unlock(&bo->lock);
 444
 445                if (sync_obj)
 446                        driver->sync_obj_flush(sync_obj, sync_obj_arg);
 447                schedule_delayed_work(&bdev->wq,
 448                                      ((HZ / 100) < 1) ? 1 : HZ / 100);
 449                ret = 0;
 450
 451        } else {
 452                spin_unlock(&glob->lru_lock);
 453                spin_unlock(&bo->lock);
 454                ret = -EBUSY;
 455        }
 456
 457        return ret;
 458}
 459
 460/**
 461 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 462 * encountered buffers.
 463 */
 464
 465static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 466{
 467        struct ttm_bo_global *glob = bdev->glob;
 468        struct ttm_buffer_object *entry, *nentry;
 469        struct list_head *list, *next;
 470        int ret;
 471
 472        spin_lock(&glob->lru_lock);
 473        list_for_each_safe(list, next, &bdev->ddestroy) {
 474                entry = list_entry(list, struct ttm_buffer_object, ddestroy);
 475                nentry = NULL;
 476
 477                /*
 478                 * Protect the next list entry from destruction while we
 479                 * unlock the lru_lock.
 480                 */
 481
 482                if (next != &bdev->ddestroy) {
 483                        nentry = list_entry(next, struct ttm_buffer_object,
 484                                            ddestroy);
 485                        kref_get(&nentry->list_kref);
 486                }
 487                kref_get(&entry->list_kref);
 488
 489                spin_unlock(&glob->lru_lock);
 490                ret = ttm_bo_cleanup_refs(entry, remove_all);
 491                kref_put(&entry->list_kref, ttm_bo_release_list);
 492
 493                spin_lock(&glob->lru_lock);
 494                if (nentry) {
 495                        bool next_onlist = !list_empty(next);
 496                        spin_unlock(&glob->lru_lock);
 497                        kref_put(&nentry->list_kref, ttm_bo_release_list);
 498                        spin_lock(&glob->lru_lock);
 499                        /*
 500                         * Someone might have raced us and removed the
 501                         * next entry from the list. We don't bother restarting
 502                         * list traversal.
 503                         */
 504
 505                        if (!next_onlist)
 506                                break;
 507                }
 508                if (ret)
 509                        break;
 510        }
 511        ret = !list_empty(&bdev->ddestroy);
 512        spin_unlock(&glob->lru_lock);
 513
 514        return ret;
 515}
 516
 517static void ttm_bo_delayed_workqueue(struct work_struct *work)
 518{
 519        struct ttm_bo_device *bdev =
 520            container_of(work, struct ttm_bo_device, wq.work);
 521
 522        if (ttm_bo_delayed_delete(bdev, false)) {
 523                schedule_delayed_work(&bdev->wq,
 524                                      ((HZ / 100) < 1) ? 1 : HZ / 100);
 525        }
 526}
 527
 528static void ttm_bo_release(struct kref *kref)
 529{
 530        struct ttm_buffer_object *bo =
 531            container_of(kref, struct ttm_buffer_object, kref);
 532        struct ttm_bo_device *bdev = bo->bdev;
 533
 534        if (likely(bo->vm_node != NULL)) {
 535                rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
 536                drm_mm_put_block(bo->vm_node);
 537                bo->vm_node = NULL;
 538        }
 539        write_unlock(&bdev->vm_lock);
 540        ttm_bo_cleanup_refs(bo, false);
 541        kref_put(&bo->list_kref, ttm_bo_release_list);
 542        write_lock(&bdev->vm_lock);
 543}
 544
 545void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 546{
 547        struct ttm_buffer_object *bo = *p_bo;
 548        struct ttm_bo_device *bdev = bo->bdev;
 549
 550        *p_bo = NULL;
 551        write_lock(&bdev->vm_lock);
 552        kref_put(&bo->kref, ttm_bo_release);
 553        write_unlock(&bdev->vm_lock);
 554}
 555EXPORT_SYMBOL(ttm_bo_unref);
 556
 557static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
 558                        bool interruptible, bool no_wait)
 559{
 560        int ret = 0;
 561        struct ttm_bo_device *bdev = bo->bdev;
 562        struct ttm_bo_global *glob = bo->glob;
 563        struct ttm_mem_reg evict_mem;
 564        uint32_t proposed_placement;
 565
 566        if (bo->mem.mem_type != mem_type)
 567                goto out;
 568
 569        spin_lock(&bo->lock);
 570        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 571        spin_unlock(&bo->lock);
 572
 573        if (unlikely(ret != 0)) {
 574                if (ret != -ERESTART) {
 575                        printk(KERN_ERR TTM_PFX
 576                               "Failed to expire sync object before "
 577                               "buffer eviction.\n");
 578                }
 579                goto out;
 580        }
 581
 582        BUG_ON(!atomic_read(&bo->reserved));
 583
 584        evict_mem = bo->mem;
 585        evict_mem.mm_node = NULL;
 586
 587        proposed_placement = bdev->driver->evict_flags(bo);
 588
 589        ret = ttm_bo_mem_space(bo, proposed_placement,
 590                               &evict_mem, interruptible, no_wait);
 591        if (unlikely(ret != 0 && ret != -ERESTART))
 592                ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
 593                                       &evict_mem, interruptible, no_wait);
 594
 595        if (ret) {
 596                if (ret != -ERESTART)
 597                        printk(KERN_ERR TTM_PFX
 598                               "Failed to find memory space for "
 599                               "buffer 0x%p eviction.\n", bo);
 600                goto out;
 601        }
 602
 603        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
 604                                     no_wait);
 605        if (ret) {
 606                if (ret != -ERESTART)
 607                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
 608                goto out;
 609        }
 610
 611        spin_lock(&glob->lru_lock);
 612        if (evict_mem.mm_node) {
 613                drm_mm_put_block(evict_mem.mm_node);
 614                evict_mem.mm_node = NULL;
 615        }
 616        spin_unlock(&glob->lru_lock);
 617        bo->evicted = true;
 618out:
 619        return ret;
 620}
 621
 622/**
 623 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 624 * space, or we've evicted everything and there isn't enough space.
 625 */
 626static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
 627                                  struct ttm_mem_reg *mem,
 628                                  uint32_t mem_type,
 629                                  bool interruptible, bool no_wait)
 630{
 631        struct ttm_bo_global *glob = bdev->glob;
 632        struct drm_mm_node *node;
 633        struct ttm_buffer_object *entry;
 634        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 635        struct list_head *lru;
 636        unsigned long num_pages = mem->num_pages;
 637        int put_count = 0;
 638        int ret;
 639
 640retry_pre_get:
 641        ret = drm_mm_pre_get(&man->manager);
 642        if (unlikely(ret != 0))
 643                return ret;
 644
 645        spin_lock(&glob->lru_lock);
 646        do {
 647                node = drm_mm_search_free(&man->manager, num_pages,
 648                                          mem->page_alignment, 1);
 649                if (node)
 650                        break;
 651
 652                lru = &man->lru;
 653                if (list_empty(lru))
 654                        break;
 655
 656                entry = list_first_entry(lru, struct ttm_buffer_object, lru);
 657                kref_get(&entry->list_kref);
 658
 659                ret =
 660                    ttm_bo_reserve_locked(entry, interruptible, no_wait,
 661                                          false, 0);
 662
 663                if (likely(ret == 0))
 664                        put_count = ttm_bo_del_from_lru(entry);
 665
 666                spin_unlock(&glob->lru_lock);
 667
 668                if (unlikely(ret != 0))
 669                        return ret;
 670
 671                while (put_count--)
 672                        kref_put(&entry->list_kref, ttm_bo_ref_bug);
 673
 674                ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
 675
 676                ttm_bo_unreserve(entry);
 677
 678                kref_put(&entry->list_kref, ttm_bo_release_list);
 679                if (ret)
 680                        return ret;
 681
 682                spin_lock(&glob->lru_lock);
 683        } while (1);
 684
 685        if (!node) {
 686                spin_unlock(&glob->lru_lock);
 687                return -ENOMEM;
 688        }
 689
 690        node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
 691        if (unlikely(!node)) {
 692                spin_unlock(&glob->lru_lock);
 693                goto retry_pre_get;
 694        }
 695
 696        spin_unlock(&glob->lru_lock);
 697        mem->mm_node = node;
 698        mem->mem_type = mem_type;
 699        return 0;
 700}
 701
 702static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 703                                      uint32_t cur_placement,
 704                                      uint32_t proposed_placement)
 705{
 706        uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 707        uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 708
 709        /**
 710         * Keep current caching if possible.
 711         */
 712
 713        if ((cur_placement & caching) != 0)
 714                result |= (cur_placement & caching);
 715        else if ((man->default_caching & caching) != 0)
 716                result |= man->default_caching;
 717        else if ((TTM_PL_FLAG_CACHED & caching) != 0)
 718                result |= TTM_PL_FLAG_CACHED;
 719        else if ((TTM_PL_FLAG_WC & caching) != 0)
 720                result |= TTM_PL_FLAG_WC;
 721        else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
 722                result |= TTM_PL_FLAG_UNCACHED;
 723
 724        return result;
 725}
 726
 727
 728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 729                                 bool disallow_fixed,
 730                                 uint32_t mem_type,
 731                                 uint32_t proposed_placement,
 732                                 uint32_t *masked_placement)
 733{
 734        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 735
 736        if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
 737                return false;
 738
 739        if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
 740                return false;
 741
 742        if ((proposed_placement & man->available_caching) == 0)
 743                return false;
 744
 745        cur_flags |= (proposed_placement & man->available_caching);
 746
 747        *masked_placement = cur_flags;
 748        return true;
 749}
 750
 751/**
 752 * Creates space for memory region @mem according to its type.
 753 *
 754 * This function first searches for free space in compatible memory types in
 755 * the priority order defined by the driver.  If free space isn't found, then
 756 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 757 * space.
 758 */
 759int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 760                     uint32_t proposed_placement,
 761                     struct ttm_mem_reg *mem,
 762                     bool interruptible, bool no_wait)
 763{
 764        struct ttm_bo_device *bdev = bo->bdev;
 765        struct ttm_bo_global *glob = bo->glob;
 766        struct ttm_mem_type_manager *man;
 767
 768        uint32_t num_prios = bdev->driver->num_mem_type_prio;
 769        const uint32_t *prios = bdev->driver->mem_type_prio;
 770        uint32_t i;
 771        uint32_t mem_type = TTM_PL_SYSTEM;
 772        uint32_t cur_flags = 0;
 773        bool type_found = false;
 774        bool type_ok = false;
 775        bool has_eagain = false;
 776        struct drm_mm_node *node = NULL;
 777        int ret;
 778
 779        mem->mm_node = NULL;
 780        for (i = 0; i < num_prios; ++i) {
 781                mem_type = prios[i];
 782                man = &bdev->man[mem_type];
 783
 784                type_ok = ttm_bo_mt_compatible(man,
 785                                               bo->type == ttm_bo_type_user,
 786                                               mem_type, proposed_placement,
 787                                               &cur_flags);
 788
 789                if (!type_ok)
 790                        continue;
 791
 792                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 793                                                  cur_flags);
 794
 795                if (mem_type == TTM_PL_SYSTEM)
 796                        break;
 797
 798                if (man->has_type && man->use_type) {
 799                        type_found = true;
 800                        do {
 801                                ret = drm_mm_pre_get(&man->manager);
 802                                if (unlikely(ret))
 803                                        return ret;
 804
 805                                spin_lock(&glob->lru_lock);
 806                                node = drm_mm_search_free(&man->manager,
 807                                                          mem->num_pages,
 808                                                          mem->page_alignment,
 809                                                          1);
 810                                if (unlikely(!node)) {
 811                                        spin_unlock(&glob->lru_lock);
 812                                        break;
 813                                }
 814                                node = drm_mm_get_block_atomic(node,
 815                                                               mem->num_pages,
 816                                                               mem->
 817                                                               page_alignment);
 818                                spin_unlock(&glob->lru_lock);
 819                        } while (!node);
 820                }
 821                if (node)
 822                        break;
 823        }
 824
 825        if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
 826                mem->mm_node = node;
 827                mem->mem_type = mem_type;
 828                mem->placement = cur_flags;
 829                return 0;
 830        }
 831
 832        if (!type_found)
 833                return -EINVAL;
 834
 835        num_prios = bdev->driver->num_mem_busy_prio;
 836        prios = bdev->driver->mem_busy_prio;
 837
 838        for (i = 0; i < num_prios; ++i) {
 839                mem_type = prios[i];
 840                man = &bdev->man[mem_type];
 841
 842                if (!man->has_type)
 843                        continue;
 844
 845                if (!ttm_bo_mt_compatible(man,
 846                                          bo->type == ttm_bo_type_user,
 847                                          mem_type,
 848                                          proposed_placement, &cur_flags))
 849                        continue;
 850
 851                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 852                                                  cur_flags);
 853
 854                ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
 855                                             interruptible, no_wait);
 856
 857                if (ret == 0 && mem->mm_node) {
 858                        mem->placement = cur_flags;
 859                        return 0;
 860                }
 861
 862                if (ret == -ERESTART)
 863                        has_eagain = true;
 864        }
 865
 866        ret = (has_eagain) ? -ERESTART : -ENOMEM;
 867        return ret;
 868}
 869EXPORT_SYMBOL(ttm_bo_mem_space);
 870
 871int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
 872{
 873        int ret = 0;
 874
 875        if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
 876                return -EBUSY;
 877
 878        ret = wait_event_interruptible(bo->event_queue,
 879                                       atomic_read(&bo->cpu_writers) == 0);
 880
 881        if (ret == -ERESTARTSYS)
 882                ret = -ERESTART;
 883
 884        return ret;
 885}
 886
 887int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 888                       uint32_t proposed_placement,
 889                       bool interruptible, bool no_wait)
 890{
 891        struct ttm_bo_global *glob = bo->glob;
 892        int ret = 0;
 893        struct ttm_mem_reg mem;
 894
 895        BUG_ON(!atomic_read(&bo->reserved));
 896
 897        /*
 898         * FIXME: It's possible to pipeline buffer moves.
 899         * Have the driver move function wait for idle when necessary,
 900         * instead of doing it here.
 901         */
 902
 903        spin_lock(&bo->lock);
 904        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 905        spin_unlock(&bo->lock);
 906
 907        if (ret)
 908                return ret;
 909
 910        mem.num_pages = bo->num_pages;
 911        mem.size = mem.num_pages << PAGE_SHIFT;
 912        mem.page_alignment = bo->mem.page_alignment;
 913
 914        /*
 915         * Determine where to move the buffer.
 916         */
 917
 918        ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
 919                               interruptible, no_wait);
 920        if (ret)
 921                goto out_unlock;
 922
 923        ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
 924
 925out_unlock:
 926        if (ret && mem.mm_node) {
 927                spin_lock(&glob->lru_lock);
 928                drm_mm_put_block(mem.mm_node);
 929                spin_unlock(&glob->lru_lock);
 930        }
 931        return ret;
 932}
 933
 934static int ttm_bo_mem_compat(uint32_t proposed_placement,
 935                             struct ttm_mem_reg *mem)
 936{
 937        if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
 938                return 0;
 939        if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
 940                return 0;
 941
 942        return 1;
 943}
 944
 945int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
 946                               uint32_t proposed_placement,
 947                               bool interruptible, bool no_wait)
 948{
 949        int ret;
 950
 951        BUG_ON(!atomic_read(&bo->reserved));
 952        bo->proposed_placement = proposed_placement;
 953
 954        TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
 955                  (unsigned long)proposed_placement,
 956                  (unsigned long)bo->mem.placement);
 957
 958        /*
 959         * Check whether we need to move buffer.
 960         */
 961
 962        if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
 963                ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
 964                                         interruptible, no_wait);
 965                if (ret) {
 966                        if (ret != -ERESTART)
 967                                printk(KERN_ERR TTM_PFX
 968                                       "Failed moving buffer. "
 969                                       "Proposed placement 0x%08x\n",
 970                                       bo->proposed_placement);
 971                        if (ret == -ENOMEM)
 972                                printk(KERN_ERR TTM_PFX
 973                                       "Out of aperture space or "
 974                                       "DRM memory quota.\n");
 975                        return ret;
 976                }
 977        }
 978
 979        /*
 980         * We might need to add a TTM.
 981         */
 982
 983        if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 984                ret = ttm_bo_add_ttm(bo, true);
 985                if (ret)
 986                        return ret;
 987        }
 988        /*
 989         * Validation has succeeded, move the access and other
 990         * non-mapping-related flag bits from the proposed flags to
 991         * the active flags
 992         */
 993
 994        ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
 995                        ~TTM_PL_MASK_MEMTYPE);
 996
 997        return 0;
 998}
 999EXPORT_SYMBOL(ttm_buffer_object_validate);
1000
1001int
1002ttm_bo_check_placement(struct ttm_buffer_object *bo,
1003                       uint32_t set_flags, uint32_t clr_flags)
1004{
1005        uint32_t new_mask = set_flags | clr_flags;
1006
1007        if ((bo->type == ttm_bo_type_user) &&
1008            (clr_flags & TTM_PL_FLAG_CACHED)) {
1009                printk(KERN_ERR TTM_PFX
1010                       "User buffers require cache-coherent memory.\n");
1011                return -EINVAL;
1012        }
1013
1014        if (!capable(CAP_SYS_ADMIN)) {
1015                if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1016                        printk(KERN_ERR TTM_PFX "Need to be root to modify"
1017                               " NO_EVICT status.\n");
1018                        return -EINVAL;
1019                }
1020
1021                if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1022                    (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1023                        printk(KERN_ERR TTM_PFX
1024                               "Incompatible memory specification"
1025                               " for NO_EVICT buffer.\n");
1026                        return -EINVAL;
1027                }
1028        }
1029        return 0;
1030}
1031
1032int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1033                           struct ttm_buffer_object *bo,
1034                           unsigned long size,
1035                           enum ttm_bo_type type,
1036                           uint32_t flags,
1037                           uint32_t page_alignment,
1038                           unsigned long buffer_start,
1039                           bool interruptible,
1040                           struct file *persistant_swap_storage,
1041                           size_t acc_size,
1042                           void (*destroy) (struct ttm_buffer_object *))
1043{
1044        int ret = 0;
1045        unsigned long num_pages;
1046
1047        size += buffer_start & ~PAGE_MASK;
1048        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1049        if (num_pages == 0) {
1050                printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1051                return -EINVAL;
1052        }
1053        bo->destroy = destroy;
1054
1055        spin_lock_init(&bo->lock);
1056        kref_init(&bo->kref);
1057        kref_init(&bo->list_kref);
1058        atomic_set(&bo->cpu_writers, 0);
1059        atomic_set(&bo->reserved, 1);
1060        init_waitqueue_head(&bo->event_queue);
1061        INIT_LIST_HEAD(&bo->lru);
1062        INIT_LIST_HEAD(&bo->ddestroy);
1063        INIT_LIST_HEAD(&bo->swap);
1064        bo->bdev = bdev;
1065        bo->glob = bdev->glob;
1066        bo->type = type;
1067        bo->num_pages = num_pages;
1068        bo->mem.mem_type = TTM_PL_SYSTEM;
1069        bo->mem.num_pages = bo->num_pages;
1070        bo->mem.mm_node = NULL;
1071        bo->mem.page_alignment = page_alignment;
1072        bo->buffer_start = buffer_start & PAGE_MASK;
1073        bo->priv_flags = 0;
1074        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1075        bo->seq_valid = false;
1076        bo->persistant_swap_storage = persistant_swap_storage;
1077        bo->acc_size = acc_size;
1078        atomic_inc(&bo->glob->bo_count);
1079
1080        ret = ttm_bo_check_placement(bo, flags, 0ULL);
1081        if (unlikely(ret != 0))
1082                goto out_err;
1083
1084        /*
1085         * If no caching attributes are set, accept any form of caching.
1086         */
1087
1088        if ((flags & TTM_PL_MASK_CACHING) == 0)
1089                flags |= TTM_PL_MASK_CACHING;
1090
1091        /*
1092         * For ttm_bo_type_device buffers, allocate
1093         * address space from the device.
1094         */
1095
1096        if (bo->type == ttm_bo_type_device) {
1097                ret = ttm_bo_setup_vm(bo);
1098                if (ret)
1099                        goto out_err;
1100        }
1101
1102        ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1103        if (ret)
1104                goto out_err;
1105
1106        ttm_bo_unreserve(bo);
1107        return 0;
1108
1109out_err:
1110        ttm_bo_unreserve(bo);
1111        ttm_bo_unref(&bo);
1112
1113        return ret;
1114}
1115EXPORT_SYMBOL(ttm_buffer_object_init);
1116
1117static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1118                                 unsigned long num_pages)
1119{
1120        size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1121            PAGE_MASK;
1122
1123        return glob->ttm_bo_size + 2 * page_array_size;
1124}
1125
1126int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1127                             unsigned long size,
1128                             enum ttm_bo_type type,
1129                             uint32_t flags,
1130                             uint32_t page_alignment,
1131                             unsigned long buffer_start,
1132                             bool interruptible,
1133                             struct file *persistant_swap_storage,
1134                             struct ttm_buffer_object **p_bo)
1135{
1136        struct ttm_buffer_object *bo;
1137        int ret;
1138        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1139
1140        size_t acc_size =
1141            ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1142        ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1143        if (unlikely(ret != 0))
1144                return ret;
1145
1146        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1147
1148        if (unlikely(bo == NULL)) {
1149                ttm_mem_global_free(mem_glob, acc_size);
1150                return -ENOMEM;
1151        }
1152
1153        ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1154                                     page_alignment, buffer_start,
1155                                     interruptible,
1156                                     persistant_swap_storage, acc_size, NULL);
1157        if (likely(ret == 0))
1158                *p_bo = bo;
1159
1160        return ret;
1161}
1162
1163static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1164                             uint32_t mem_type, bool allow_errors)
1165{
1166        int ret;
1167
1168        spin_lock(&bo->lock);
1169        ret = ttm_bo_wait(bo, false, false, false);
1170        spin_unlock(&bo->lock);
1171
1172        if (ret && allow_errors)
1173                goto out;
1174
1175        if (bo->mem.mem_type == mem_type)
1176                ret = ttm_bo_evict(bo, mem_type, false, false);
1177
1178        if (ret) {
1179                if (allow_errors) {
1180                        goto out;
1181                } else {
1182                        ret = 0;
1183                        printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1184                }
1185        }
1186
1187out:
1188        return ret;
1189}
1190
1191static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1192                                   struct list_head *head,
1193                                   unsigned mem_type, bool allow_errors)
1194{
1195        struct ttm_bo_global *glob = bdev->glob;
1196        struct ttm_buffer_object *entry;
1197        int ret;
1198        int put_count;
1199
1200        /*
1201         * Can't use standard list traversal since we're unlocking.
1202         */
1203
1204        spin_lock(&glob->lru_lock);
1205
1206        while (!list_empty(head)) {
1207                entry = list_first_entry(head, struct ttm_buffer_object, lru);
1208                kref_get(&entry->list_kref);
1209                ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1210                put_count = ttm_bo_del_from_lru(entry);
1211                spin_unlock(&glob->lru_lock);
1212                while (put_count--)
1213                        kref_put(&entry->list_kref, ttm_bo_ref_bug);
1214                BUG_ON(ret);
1215                ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1216                ttm_bo_unreserve(entry);
1217                kref_put(&entry->list_kref, ttm_bo_release_list);
1218                spin_lock(&glob->lru_lock);
1219        }
1220
1221        spin_unlock(&glob->lru_lock);
1222
1223        return 0;
1224}
1225
1226int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1227{
1228        struct ttm_bo_global *glob = bdev->glob;
1229        struct ttm_mem_type_manager *man;
1230        int ret = -EINVAL;
1231
1232        if (mem_type >= TTM_NUM_MEM_TYPES) {
1233                printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1234                return ret;
1235        }
1236        man = &bdev->man[mem_type];
1237
1238        if (!man->has_type) {
1239                printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1240                       "memory manager type %u\n", mem_type);
1241                return ret;
1242        }
1243
1244        man->use_type = false;
1245        man->has_type = false;
1246
1247        ret = 0;
1248        if (mem_type > 0) {
1249                ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1250
1251                spin_lock(&glob->lru_lock);
1252                if (drm_mm_clean(&man->manager))
1253                        drm_mm_takedown(&man->manager);
1254                else
1255                        ret = -EBUSY;
1256
1257                spin_unlock(&glob->lru_lock);
1258        }
1259
1260        return ret;
1261}
1262EXPORT_SYMBOL(ttm_bo_clean_mm);
1263
1264int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1265{
1266        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1267
1268        if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1269                printk(KERN_ERR TTM_PFX
1270                       "Illegal memory manager memory type %u.\n",
1271                       mem_type);
1272                return -EINVAL;
1273        }
1274
1275        if (!man->has_type) {
1276                printk(KERN_ERR TTM_PFX
1277                       "Memory type %u has not been initialized.\n",
1278                       mem_type);
1279                return 0;
1280        }
1281
1282        return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1283}
1284EXPORT_SYMBOL(ttm_bo_evict_mm);
1285
1286int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1287                   unsigned long p_offset, unsigned long p_size)
1288{
1289        int ret = -EINVAL;
1290        struct ttm_mem_type_manager *man;
1291
1292        if (type >= TTM_NUM_MEM_TYPES) {
1293                printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1294                return ret;
1295        }
1296
1297        man = &bdev->man[type];
1298        if (man->has_type) {
1299                printk(KERN_ERR TTM_PFX
1300                       "Memory manager already initialized for type %d\n",
1301                       type);
1302                return ret;
1303        }
1304
1305        ret = bdev->driver->init_mem_type(bdev, type, man);
1306        if (ret)
1307                return ret;
1308
1309        ret = 0;
1310        if (type != TTM_PL_SYSTEM) {
1311                if (!p_size) {
1312                        printk(KERN_ERR TTM_PFX
1313                               "Zero size memory manager type %d\n",
1314                               type);
1315                        return ret;
1316                }
1317                ret = drm_mm_init(&man->manager, p_offset, p_size);
1318                if (ret)
1319                        return ret;
1320        }
1321        man->has_type = true;
1322        man->use_type = true;
1323        man->size = p_size;
1324
1325        INIT_LIST_HEAD(&man->lru);
1326
1327        return 0;
1328}
1329EXPORT_SYMBOL(ttm_bo_init_mm);
1330
1331static void ttm_bo_global_kobj_release(struct kobject *kobj)
1332{
1333        struct ttm_bo_global *glob =
1334                container_of(kobj, struct ttm_bo_global, kobj);
1335
1336        ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1337        __free_page(glob->dummy_read_page);
1338        kfree(glob);
1339}
1340
1341void ttm_bo_global_release(struct ttm_global_reference *ref)
1342{
1343        struct ttm_bo_global *glob = ref->object;
1344
1345        kobject_del(&glob->kobj);
1346        kobject_put(&glob->kobj);
1347}
1348EXPORT_SYMBOL(ttm_bo_global_release);
1349
1350int ttm_bo_global_init(struct ttm_global_reference *ref)
1351{
1352        struct ttm_bo_global_ref *bo_ref =
1353                container_of(ref, struct ttm_bo_global_ref, ref);
1354        struct ttm_bo_global *glob = ref->object;
1355        int ret;
1356
1357        mutex_init(&glob->device_list_mutex);
1358        spin_lock_init(&glob->lru_lock);
1359        glob->mem_glob = bo_ref->mem_glob;
1360        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1361
1362        if (unlikely(glob->dummy_read_page == NULL)) {
1363                ret = -ENOMEM;
1364                goto out_no_drp;
1365        }
1366
1367        INIT_LIST_HEAD(&glob->swap_lru);
1368        INIT_LIST_HEAD(&glob->device_list);
1369
1370        ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1371        ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1372        if (unlikely(ret != 0)) {
1373                printk(KERN_ERR TTM_PFX
1374                       "Could not register buffer object swapout.\n");
1375                goto out_no_shrink;
1376        }
1377
1378        glob->ttm_bo_extra_size =
1379                ttm_round_pot(sizeof(struct ttm_tt)) +
1380                ttm_round_pot(sizeof(struct ttm_backend));
1381
1382        glob->ttm_bo_size = glob->ttm_bo_extra_size +
1383                ttm_round_pot(sizeof(struct ttm_buffer_object));
1384
1385        atomic_set(&glob->bo_count, 0);
1386
1387        kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1388        ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1389        if (unlikely(ret != 0))
1390                kobject_put(&glob->kobj);
1391        return ret;
1392out_no_shrink:
1393        __free_page(glob->dummy_read_page);
1394out_no_drp:
1395        kfree(glob);
1396        return ret;
1397}
1398EXPORT_SYMBOL(ttm_bo_global_init);
1399
1400
1401int ttm_bo_device_release(struct ttm_bo_device *bdev)
1402{
1403        int ret = 0;
1404        unsigned i = TTM_NUM_MEM_TYPES;
1405        struct ttm_mem_type_manager *man;
1406        struct ttm_bo_global *glob = bdev->glob;
1407
1408        while (i--) {
1409                man = &bdev->man[i];
1410                if (man->has_type) {
1411                        man->use_type = false;
1412                        if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1413                                ret = -EBUSY;
1414                                printk(KERN_ERR TTM_PFX
1415                                       "DRM memory manager type %d "
1416                                       "is not clean.\n", i);
1417                        }
1418                        man->has_type = false;
1419                }
1420        }
1421
1422        mutex_lock(&glob->device_list_mutex);
1423        list_del(&bdev->device_list);
1424        mutex_unlock(&glob->device_list_mutex);
1425
1426        if (!cancel_delayed_work(&bdev->wq))
1427                flush_scheduled_work();
1428
1429        while (ttm_bo_delayed_delete(bdev, true))
1430                ;
1431
1432        spin_lock(&glob->lru_lock);
1433        if (list_empty(&bdev->ddestroy))
1434                TTM_DEBUG("Delayed destroy list was clean\n");
1435
1436        if (list_empty(&bdev->man[0].lru))
1437                TTM_DEBUG("Swap list was clean\n");
1438        spin_unlock(&glob->lru_lock);
1439
1440        BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1441        write_lock(&bdev->vm_lock);
1442        drm_mm_takedown(&bdev->addr_space_mm);
1443        write_unlock(&bdev->vm_lock);
1444
1445        return ret;
1446}
1447EXPORT_SYMBOL(ttm_bo_device_release);
1448
1449int ttm_bo_device_init(struct ttm_bo_device *bdev,
1450                       struct ttm_bo_global *glob,
1451                       struct ttm_bo_driver *driver,
1452                       uint64_t file_page_offset,
1453                       bool need_dma32)
1454{
1455        int ret = -EINVAL;
1456
1457        rwlock_init(&bdev->vm_lock);
1458        bdev->driver = driver;
1459
1460        memset(bdev->man, 0, sizeof(bdev->man));
1461
1462        /*
1463         * Initialize the system memory buffer type.
1464         * Other types need to be driver / IOCTL initialized.
1465         */
1466        ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1467        if (unlikely(ret != 0))
1468                goto out_no_sys;
1469
1470        bdev->addr_space_rb = RB_ROOT;
1471        ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1472        if (unlikely(ret != 0))
1473                goto out_no_addr_mm;
1474
1475        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1476        bdev->nice_mode = true;
1477        INIT_LIST_HEAD(&bdev->ddestroy);
1478        bdev->dev_mapping = NULL;
1479        bdev->glob = glob;
1480        bdev->need_dma32 = need_dma32;
1481
1482        mutex_lock(&glob->device_list_mutex);
1483        list_add_tail(&bdev->device_list, &glob->device_list);
1484        mutex_unlock(&glob->device_list_mutex);
1485
1486        return 0;
1487out_no_addr_mm:
1488        ttm_bo_clean_mm(bdev, 0);
1489out_no_sys:
1490        return ret;
1491}
1492EXPORT_SYMBOL(ttm_bo_device_init);
1493
1494/*
1495 * buffer object vm functions.
1496 */
1497
1498bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1499{
1500        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1501
1502        if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1503                if (mem->mem_type == TTM_PL_SYSTEM)
1504                        return false;
1505
1506                if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1507                        return false;
1508
1509                if (mem->placement & TTM_PL_FLAG_CACHED)
1510                        return false;
1511        }
1512        return true;
1513}
1514
1515int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1516                      struct ttm_mem_reg *mem,
1517                      unsigned long *bus_base,
1518                      unsigned long *bus_offset, unsigned long *bus_size)
1519{
1520        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1521
1522        *bus_size = 0;
1523        if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1524                return -EINVAL;
1525
1526        if (ttm_mem_reg_is_pci(bdev, mem)) {
1527                *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1528                *bus_size = mem->num_pages << PAGE_SHIFT;
1529                *bus_base = man->io_offset;
1530        }
1531
1532        return 0;
1533}
1534
1535void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1536{
1537        struct ttm_bo_device *bdev = bo->bdev;
1538        loff_t offset = (loff_t) bo->addr_space_offset;
1539        loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1540
1541        if (!bdev->dev_mapping)
1542                return;
1543
1544        unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1545}
1546EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1547
1548static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1549{
1550        struct ttm_bo_device *bdev = bo->bdev;
1551        struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1552        struct rb_node *parent = NULL;
1553        struct ttm_buffer_object *cur_bo;
1554        unsigned long offset = bo->vm_node->start;
1555        unsigned long cur_offset;
1556
1557        while (*cur) {
1558                parent = *cur;
1559                cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1560                cur_offset = cur_bo->vm_node->start;
1561                if (offset < cur_offset)
1562                        cur = &parent->rb_left;
1563                else if (offset > cur_offset)
1564                        cur = &parent->rb_right;
1565                else
1566                        BUG();
1567        }
1568
1569        rb_link_node(&bo->vm_rb, parent, cur);
1570        rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1571}
1572
1573/**
1574 * ttm_bo_setup_vm:
1575 *
1576 * @bo: the buffer to allocate address space for
1577 *
1578 * Allocate address space in the drm device so that applications
1579 * can mmap the buffer and access the contents. This only
1580 * applies to ttm_bo_type_device objects as others are not
1581 * placed in the drm device address space.
1582 */
1583
1584static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1585{
1586        struct ttm_bo_device *bdev = bo->bdev;
1587        int ret;
1588
1589retry_pre_get:
1590        ret = drm_mm_pre_get(&bdev->addr_space_mm);
1591        if (unlikely(ret != 0))
1592                return ret;
1593
1594        write_lock(&bdev->vm_lock);
1595        bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1596                                         bo->mem.num_pages, 0, 0);
1597
1598        if (unlikely(bo->vm_node == NULL)) {
1599                ret = -ENOMEM;
1600                goto out_unlock;
1601        }
1602
1603        bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1604                                              bo->mem.num_pages, 0);
1605
1606        if (unlikely(bo->vm_node == NULL)) {
1607                write_unlock(&bdev->vm_lock);
1608                goto retry_pre_get;
1609        }
1610
1611        ttm_bo_vm_insert_rb(bo);
1612        write_unlock(&bdev->vm_lock);
1613        bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1614
1615        return 0;
1616out_unlock:
1617        write_unlock(&bdev->vm_lock);
1618        return ret;
1619}
1620
1621int ttm_bo_wait(struct ttm_buffer_object *bo,
1622                bool lazy, bool interruptible, bool no_wait)
1623{
1624        struct ttm_bo_driver *driver = bo->bdev->driver;
1625        void *sync_obj;
1626        void *sync_obj_arg;
1627        int ret = 0;
1628
1629        if (likely(bo->sync_obj == NULL))
1630                return 0;
1631
1632        while (bo->sync_obj) {
1633
1634                if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1635                        void *tmp_obj = bo->sync_obj;
1636                        bo->sync_obj = NULL;
1637                        clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1638                        spin_unlock(&bo->lock);
1639                        driver->sync_obj_unref(&tmp_obj);
1640                        spin_lock(&bo->lock);
1641                        continue;
1642                }
1643
1644                if (no_wait)
1645                        return -EBUSY;
1646
1647                sync_obj = driver->sync_obj_ref(bo->sync_obj);
1648                sync_obj_arg = bo->sync_obj_arg;
1649                spin_unlock(&bo->lock);
1650                ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1651                                            lazy, interruptible);
1652                if (unlikely(ret != 0)) {
1653                        driver->sync_obj_unref(&sync_obj);
1654                        spin_lock(&bo->lock);
1655                        return ret;
1656                }
1657                spin_lock(&bo->lock);
1658                if (likely(bo->sync_obj == sync_obj &&
1659                           bo->sync_obj_arg == sync_obj_arg)) {
1660                        void *tmp_obj = bo->sync_obj;
1661                        bo->sync_obj = NULL;
1662                        clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1663                                  &bo->priv_flags);
1664                        spin_unlock(&bo->lock);
1665                        driver->sync_obj_unref(&sync_obj);
1666                        driver->sync_obj_unref(&tmp_obj);
1667                        spin_lock(&bo->lock);
1668                } else {
1669                        spin_unlock(&bo->lock);
1670                        driver->sync_obj_unref(&sync_obj);
1671                        spin_lock(&bo->lock);
1672                }
1673        }
1674        return 0;
1675}
1676EXPORT_SYMBOL(ttm_bo_wait);
1677
1678void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1679{
1680        atomic_set(&bo->reserved, 0);
1681        wake_up_all(&bo->event_queue);
1682}
1683
1684int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1685                             bool no_wait)
1686{
1687        int ret;
1688
1689        while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1690                if (no_wait)
1691                        return -EBUSY;
1692                else if (interruptible) {
1693                        ret = wait_event_interruptible
1694                            (bo->event_queue, atomic_read(&bo->reserved) == 0);
1695                        if (unlikely(ret != 0))
1696                                return -ERESTART;
1697                } else {
1698                        wait_event(bo->event_queue,
1699                                   atomic_read(&bo->reserved) == 0);
1700                }
1701        }
1702        return 0;
1703}
1704
1705int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1706{
1707        int ret = 0;
1708
1709        /*
1710         * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1711         * makes sure the lru lists are updated.
1712         */
1713
1714        ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1715        if (unlikely(ret != 0))
1716                return ret;
1717        spin_lock(&bo->lock);
1718        ret = ttm_bo_wait(bo, false, true, no_wait);
1719        spin_unlock(&bo->lock);
1720        if (likely(ret == 0))
1721                atomic_inc(&bo->cpu_writers);
1722        ttm_bo_unreserve(bo);
1723        return ret;
1724}
1725
1726void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1727{
1728        if (atomic_dec_and_test(&bo->cpu_writers))
1729                wake_up_all(&bo->event_queue);
1730}
1731
1732/**
1733 * A buffer object shrink method that tries to swap out the first
1734 * buffer object on the bo_global::swap_lru list.
1735 */
1736
1737static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1738{
1739        struct ttm_bo_global *glob =
1740            container_of(shrink, struct ttm_bo_global, shrink);
1741        struct ttm_buffer_object *bo;
1742        int ret = -EBUSY;
1743        int put_count;
1744        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1745
1746        spin_lock(&glob->lru_lock);
1747        while (ret == -EBUSY) {
1748                if (unlikely(list_empty(&glob->swap_lru))) {
1749                        spin_unlock(&glob->lru_lock);
1750                        return -EBUSY;
1751                }
1752
1753                bo = list_first_entry(&glob->swap_lru,
1754                                      struct ttm_buffer_object, swap);
1755                kref_get(&bo->list_kref);
1756
1757                /**
1758                 * Reserve buffer. Since we unlock while sleeping, we need
1759                 * to re-check that nobody removed us from the swap-list while
1760                 * we slept.
1761                 */
1762
1763                ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1764                if (unlikely(ret == -EBUSY)) {
1765                        spin_unlock(&glob->lru_lock);
1766                        ttm_bo_wait_unreserved(bo, false);
1767                        kref_put(&bo->list_kref, ttm_bo_release_list);
1768                        spin_lock(&glob->lru_lock);
1769                }
1770        }
1771
1772        BUG_ON(ret != 0);
1773        put_count = ttm_bo_del_from_lru(bo);
1774        spin_unlock(&glob->lru_lock);
1775
1776        while (put_count--)
1777                kref_put(&bo->list_kref, ttm_bo_ref_bug);
1778
1779        /**
1780         * Wait for GPU, then move to system cached.
1781         */
1782
1783        spin_lock(&bo->lock);
1784        ret = ttm_bo_wait(bo, false, false, false);
1785        spin_unlock(&bo->lock);
1786
1787        if (unlikely(ret != 0))
1788                goto out;
1789
1790        if ((bo->mem.placement & swap_placement) != swap_placement) {
1791                struct ttm_mem_reg evict_mem;
1792
1793                evict_mem = bo->mem;
1794                evict_mem.mm_node = NULL;
1795                evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1796                evict_mem.mem_type = TTM_PL_SYSTEM;
1797
1798                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1799                                             false, false);
1800                if (unlikely(ret != 0))
1801                        goto out;
1802        }
1803
1804        ttm_bo_unmap_virtual(bo);
1805
1806        /**
1807         * Swap out. Buffer will be swapped in again as soon as
1808         * anyone tries to access a ttm page.
1809         */
1810
1811        ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1812out:
1813
1814        /**
1815         *
1816         * Unreserve without putting on LRU to avoid swapping out an
1817         * already swapped buffer.
1818         */
1819
1820        atomic_set(&bo->reserved, 0);
1821        wake_up_all(&bo->event_queue);
1822        kref_put(&bo->list_kref, ttm_bo_release_list);
1823        return ret;
1824}
1825
1826void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1827{
1828        while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1829                ;
1830}
1831