linux/drivers/gpu/drm/drm_mm.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 *
  27 **************************************************************************/
  28
  29/*
  30 * Generic simple memory manager implementation. Intended to be used as a base
  31 * class implementation for more advanced memory managers.
  32 *
  33 * Note that the algorithm used is quite simple and there might be substantial
  34 * performance gains if a smarter free list is implemented. Currently it is just an
  35 * unordered stack of free regions. This could easily be improved if an RB-tree
  36 * is used instead. At least if we expect heavy fragmentation.
  37 *
  38 * Aligned allocations can also see improvement.
  39 *
  40 * Authors:
  41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  42 */
  43
  44#include <drm/drmP.h>
  45#include <drm/drm_mm.h>
  46#include <linux/slab.h>
  47#include <linux/seq_file.h>
  48#include <linux/export.h>
  49
  50#define MM_UNUSED_TARGET 4
  51
  52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  53{
  54        struct drm_mm_node *child;
  55
  56        if (atomic)
  57                child = kzalloc(sizeof(*child), GFP_ATOMIC);
  58        else
  59                child = kzalloc(sizeof(*child), GFP_KERNEL);
  60
  61        if (unlikely(child == NULL)) {
  62                spin_lock(&mm->unused_lock);
  63                if (list_empty(&mm->unused_nodes))
  64                        child = NULL;
  65                else {
  66                        child =
  67                            list_entry(mm->unused_nodes.next,
  68                                       struct drm_mm_node, node_list);
  69                        list_del(&child->node_list);
  70                        --mm->num_unused;
  71                }
  72                spin_unlock(&mm->unused_lock);
  73        }
  74        return child;
  75}
  76
  77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
  78 * drm_mm:      memory manager struct we are pre-allocating for
  79 *
  80 * Returns 0 on success or -ENOMEM if allocation fails.
  81 */
  82int drm_mm_pre_get(struct drm_mm *mm)
  83{
  84        struct drm_mm_node *node;
  85
  86        spin_lock(&mm->unused_lock);
  87        while (mm->num_unused < MM_UNUSED_TARGET) {
  88                spin_unlock(&mm->unused_lock);
  89                node = kzalloc(sizeof(*node), GFP_KERNEL);
  90                spin_lock(&mm->unused_lock);
  91
  92                if (unlikely(node == NULL)) {
  93                        int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
  94                        spin_unlock(&mm->unused_lock);
  95                        return ret;
  96                }
  97                ++mm->num_unused;
  98                list_add_tail(&node->node_list, &mm->unused_nodes);
  99        }
 100        spin_unlock(&mm->unused_lock);
 101        return 0;
 102}
 103EXPORT_SYMBOL(drm_mm_pre_get);
 104
 105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 106{
 107        return hole_node->start + hole_node->size;
 108}
 109
 110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 111{
 112        struct drm_mm_node *next_node =
 113                list_entry(hole_node->node_list.next, struct drm_mm_node,
 114                           node_list);
 115
 116        return next_node->start;
 117}
 118
 119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 120                                 struct drm_mm_node *node,
 121                                 unsigned long size, unsigned alignment,
 122                                 unsigned long color)
 123{
 124        struct drm_mm *mm = hole_node->mm;
 125        unsigned long hole_start = drm_mm_hole_node_start(hole_node);
 126        unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 127        unsigned long adj_start = hole_start;
 128        unsigned long adj_end = hole_end;
 129
 130        BUG_ON(!hole_node->hole_follows || node->allocated);
 131
 132        if (mm->color_adjust)
 133                mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 134
 135        if (alignment) {
 136                unsigned tmp = adj_start % alignment;
 137                if (tmp)
 138                        adj_start += alignment - tmp;
 139        }
 140
 141        if (adj_start == hole_start) {
 142                hole_node->hole_follows = 0;
 143                list_del(&hole_node->hole_stack);
 144        }
 145
 146        node->start = adj_start;
 147        node->size = size;
 148        node->mm = mm;
 149        node->color = color;
 150        node->allocated = 1;
 151
 152        INIT_LIST_HEAD(&node->hole_stack);
 153        list_add(&node->node_list, &hole_node->node_list);
 154
 155        BUG_ON(node->start + node->size > adj_end);
 156
 157        node->hole_follows = 0;
 158        if (node->start + node->size < hole_end) {
 159                list_add(&node->hole_stack, &mm->hole_stack);
 160                node->hole_follows = 1;
 161        }
 162}
 163
 164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
 165                                             unsigned long size,
 166                                             unsigned alignment,
 167                                             unsigned long color,
 168                                             int atomic)
 169{
 170        struct drm_mm_node *node;
 171
 172        node = drm_mm_kmalloc(hole_node->mm, atomic);
 173        if (unlikely(node == NULL))
 174                return NULL;
 175
 176        drm_mm_insert_helper(hole_node, node, size, alignment, color);
 177
 178        return node;
 179}
 180EXPORT_SYMBOL(drm_mm_get_block_generic);
 181
 182/**
 183 * Search for free space and insert a preallocated memory node. Returns
 184 * -ENOSPC if no suitable free area is available. The preallocated memory node
 185 * must be cleared.
 186 */
 187int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 188                               unsigned long size, unsigned alignment,
 189                               unsigned long color)
 190{
 191        struct drm_mm_node *hole_node;
 192
 193        hole_node = drm_mm_search_free_generic(mm, size, alignment,
 194                                               color, 0);
 195        if (!hole_node)
 196                return -ENOSPC;
 197
 198        drm_mm_insert_helper(hole_node, node, size, alignment, color);
 199        return 0;
 200}
 201EXPORT_SYMBOL(drm_mm_insert_node_generic);
 202
 203int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
 204                       unsigned long size, unsigned alignment)
 205{
 206        return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
 207}
 208EXPORT_SYMBOL(drm_mm_insert_node);
 209
 210static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 211                                       struct drm_mm_node *node,
 212                                       unsigned long size, unsigned alignment,
 213                                       unsigned long color,
 214                                       unsigned long start, unsigned long end)
 215{
 216        struct drm_mm *mm = hole_node->mm;
 217        unsigned long hole_start = drm_mm_hole_node_start(hole_node);
 218        unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 219        unsigned long adj_start = hole_start;
 220        unsigned long adj_end = hole_end;
 221
 222        BUG_ON(!hole_node->hole_follows || node->allocated);
 223
 224        if (adj_start < start)
 225                adj_start = start;
 226        if (adj_end > end)
 227                adj_end = end;
 228
 229        if (mm->color_adjust)
 230                mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 231
 232        if (alignment) {
 233                unsigned tmp = adj_start % alignment;
 234                if (tmp)
 235                        adj_start += alignment - tmp;
 236        }
 237
 238        if (adj_start == hole_start) {
 239                hole_node->hole_follows = 0;
 240                list_del(&hole_node->hole_stack);
 241        }
 242
 243        node->start = adj_start;
 244        node->size = size;
 245        node->mm = mm;
 246        node->color = color;
 247        node->allocated = 1;
 248
 249        INIT_LIST_HEAD(&node->hole_stack);
 250        list_add(&node->node_list, &hole_node->node_list);
 251
 252        BUG_ON(node->start + node->size > adj_end);
 253        BUG_ON(node->start + node->size > end);
 254
 255        node->hole_follows = 0;
 256        if (node->start + node->size < hole_end) {
 257                list_add(&node->hole_stack, &mm->hole_stack);
 258                node->hole_follows = 1;
 259        }
 260}
 261
 262struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
 263                                                unsigned long size,
 264                                                unsigned alignment,
 265                                                unsigned long color,
 266                                                unsigned long start,
 267                                                unsigned long end,
 268                                                int atomic)
 269{
 270        struct drm_mm_node *node;
 271
 272        node = drm_mm_kmalloc(hole_node->mm, atomic);
 273        if (unlikely(node == NULL))
 274                return NULL;
 275
 276        drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
 277                                   start, end);
 278
 279        return node;
 280}
 281EXPORT_SYMBOL(drm_mm_get_block_range_generic);
 282
 283/**
 284 * Search for free space and insert a preallocated memory node. Returns
 285 * -ENOSPC if no suitable free area is available. This is for range
 286 * restricted allocations. The preallocated memory node must be cleared.
 287 */
 288int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
 289                                        unsigned long size, unsigned alignment, unsigned long color,
 290                                        unsigned long start, unsigned long end)
 291{
 292        struct drm_mm_node *hole_node;
 293
 294        hole_node = drm_mm_search_free_in_range_generic(mm,
 295                                                        size, alignment, color,
 296                                                        start, end, 0);
 297        if (!hole_node)
 298                return -ENOSPC;
 299
 300        drm_mm_insert_helper_range(hole_node, node,
 301                                   size, alignment, color,
 302                                   start, end);
 303        return 0;
 304}
 305EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 306
 307int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 308                                unsigned long size, unsigned alignment,
 309                                unsigned long start, unsigned long end)
 310{
 311        return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
 312}
 313EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 314
 315/**
 316 * Remove a memory node from the allocator.
 317 */
 318void drm_mm_remove_node(struct drm_mm_node *node)
 319{
 320        struct drm_mm *mm = node->mm;
 321        struct drm_mm_node *prev_node;
 322
 323        BUG_ON(node->scanned_block || node->scanned_prev_free
 324                                   || node->scanned_next_free);
 325
 326        prev_node =
 327            list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 328
 329        if (node->hole_follows) {
 330                BUG_ON(drm_mm_hole_node_start(node)
 331                                == drm_mm_hole_node_end(node));
 332                list_del(&node->hole_stack);
 333        } else
 334                BUG_ON(drm_mm_hole_node_start(node)
 335                                != drm_mm_hole_node_end(node));
 336
 337        if (!prev_node->hole_follows) {
 338                prev_node->hole_follows = 1;
 339                list_add(&prev_node->hole_stack, &mm->hole_stack);
 340        } else
 341                list_move(&prev_node->hole_stack, &mm->hole_stack);
 342
 343        list_del(&node->node_list);
 344        node->allocated = 0;
 345}
 346EXPORT_SYMBOL(drm_mm_remove_node);
 347
 348/*
 349 * Remove a memory node from the allocator and free the allocated struct
 350 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
 351 * drm_mm_get_block functions.
 352 */
 353void drm_mm_put_block(struct drm_mm_node *node)
 354{
 355
 356        struct drm_mm *mm = node->mm;
 357
 358        drm_mm_remove_node(node);
 359
 360        spin_lock(&mm->unused_lock);
 361        if (mm->num_unused < MM_UNUSED_TARGET) {
 362                list_add(&node->node_list, &mm->unused_nodes);
 363                ++mm->num_unused;
 364        } else
 365                kfree(node);
 366        spin_unlock(&mm->unused_lock);
 367}
 368EXPORT_SYMBOL(drm_mm_put_block);
 369
 370static int check_free_hole(unsigned long start, unsigned long end,
 371                           unsigned long size, unsigned alignment)
 372{
 373        if (end - start < size)
 374                return 0;
 375
 376        if (alignment) {
 377                unsigned tmp = start % alignment;
 378                if (tmp)
 379                        start += alignment - tmp;
 380        }
 381
 382        return end >= start + size;
 383}
 384
 385struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 386                                               unsigned long size,
 387                                               unsigned alignment,
 388                                               unsigned long color,
 389                                               bool best_match)
 390{
 391        struct drm_mm_node *entry;
 392        struct drm_mm_node *best;
 393        unsigned long best_size;
 394
 395        BUG_ON(mm->scanned_blocks);
 396
 397        best = NULL;
 398        best_size = ~0UL;
 399
 400        list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
 401                unsigned long adj_start = drm_mm_hole_node_start(entry);
 402                unsigned long adj_end = drm_mm_hole_node_end(entry);
 403
 404                if (mm->color_adjust) {
 405                        mm->color_adjust(entry, color, &adj_start, &adj_end);
 406                        if (adj_end <= adj_start)
 407                                continue;
 408                }
 409
 410                BUG_ON(!entry->hole_follows);
 411                if (!check_free_hole(adj_start, adj_end, size, alignment))
 412                        continue;
 413
 414                if (!best_match)
 415                        return entry;
 416
 417                if (entry->size < best_size) {
 418                        best = entry;
 419                        best_size = entry->size;
 420                }
 421        }
 422
 423        return best;
 424}
 425EXPORT_SYMBOL(drm_mm_search_free_generic);
 426
 427struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 428                                                        unsigned long size,
 429                                                        unsigned alignment,
 430                                                        unsigned long color,
 431                                                        unsigned long start,
 432                                                        unsigned long end,
 433                                                        bool best_match)
 434{
 435        struct drm_mm_node *entry;
 436        struct drm_mm_node *best;
 437        unsigned long best_size;
 438
 439        BUG_ON(mm->scanned_blocks);
 440
 441        best = NULL;
 442        best_size = ~0UL;
 443
 444        list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
 445                unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
 446                        start : drm_mm_hole_node_start(entry);
 447                unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
 448                        end : drm_mm_hole_node_end(entry);
 449
 450                BUG_ON(!entry->hole_follows);
 451
 452                if (mm->color_adjust) {
 453                        mm->color_adjust(entry, color, &adj_start, &adj_end);
 454                        if (adj_end <= adj_start)
 455                                continue;
 456                }
 457
 458                if (!check_free_hole(adj_start, adj_end, size, alignment))
 459                        continue;
 460
 461                if (!best_match)
 462                        return entry;
 463
 464                if (entry->size < best_size) {
 465                        best = entry;
 466                        best_size = entry->size;
 467                }
 468        }
 469
 470        return best;
 471}
 472EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 473
 474/**
 475 * Moves an allocation. To be used with embedded struct drm_mm_node.
 476 */
 477void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 478{
 479        list_replace(&old->node_list, &new->node_list);
 480        list_replace(&old->hole_stack, &new->hole_stack);
 481        new->hole_follows = old->hole_follows;
 482        new->mm = old->mm;
 483        new->start = old->start;
 484        new->size = old->size;
 485        new->color = old->color;
 486
 487        old->allocated = 0;
 488        new->allocated = 1;
 489}
 490EXPORT_SYMBOL(drm_mm_replace_node);
 491
 492/**
 493 * Initializa lru scanning.
 494 *
 495 * This simply sets up the scanning routines with the parameters for the desired
 496 * hole.
 497 *
 498 * Warning: As long as the scan list is non-empty, no other operations than
 499 * adding/removing nodes to/from the scan list are allowed.
 500 */
 501void drm_mm_init_scan(struct drm_mm *mm,
 502                      unsigned long size,
 503                      unsigned alignment,
 504                      unsigned long color)
 505{
 506        mm->scan_color = color;
 507        mm->scan_alignment = alignment;
 508        mm->scan_size = size;
 509        mm->scanned_blocks = 0;
 510        mm->scan_hit_start = 0;
 511        mm->scan_hit_end = 0;
 512        mm->scan_check_range = 0;
 513        mm->prev_scanned_node = NULL;
 514}
 515EXPORT_SYMBOL(drm_mm_init_scan);
 516
 517/**
 518 * Initializa lru scanning.
 519 *
 520 * This simply sets up the scanning routines with the parameters for the desired
 521 * hole. This version is for range-restricted scans.
 522 *
 523 * Warning: As long as the scan list is non-empty, no other operations than
 524 * adding/removing nodes to/from the scan list are allowed.
 525 */
 526void drm_mm_init_scan_with_range(struct drm_mm *mm,
 527                                 unsigned long size,
 528                                 unsigned alignment,
 529                                 unsigned long color,
 530                                 unsigned long start,
 531                                 unsigned long end)
 532{
 533        mm->scan_color = color;
 534        mm->scan_alignment = alignment;
 535        mm->scan_size = size;
 536        mm->scanned_blocks = 0;
 537        mm->scan_hit_start = 0;
 538        mm->scan_hit_end = 0;
 539        mm->scan_start = start;
 540        mm->scan_end = end;
 541        mm->scan_check_range = 1;
 542        mm->prev_scanned_node = NULL;
 543}
 544EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 545
 546/**
 547 * Add a node to the scan list that might be freed to make space for the desired
 548 * hole.
 549 *
 550 * Returns non-zero, if a hole has been found, zero otherwise.
 551 */
 552int drm_mm_scan_add_block(struct drm_mm_node *node)
 553{
 554        struct drm_mm *mm = node->mm;
 555        struct drm_mm_node *prev_node;
 556        unsigned long hole_start, hole_end;
 557        unsigned long adj_start, adj_end;
 558
 559        mm->scanned_blocks++;
 560
 561        BUG_ON(node->scanned_block);
 562        node->scanned_block = 1;
 563
 564        prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
 565                               node_list);
 566
 567        node->scanned_preceeds_hole = prev_node->hole_follows;
 568        prev_node->hole_follows = 1;
 569        list_del(&node->node_list);
 570        node->node_list.prev = &prev_node->node_list;
 571        node->node_list.next = &mm->prev_scanned_node->node_list;
 572        mm->prev_scanned_node = node;
 573
 574        adj_start = hole_start = drm_mm_hole_node_start(prev_node);
 575        adj_end = hole_end = drm_mm_hole_node_end(prev_node);
 576
 577        if (mm->scan_check_range) {
 578                if (adj_start < mm->scan_start)
 579                        adj_start = mm->scan_start;
 580                if (adj_end > mm->scan_end)
 581                        adj_end = mm->scan_end;
 582        }
 583
 584        if (mm->color_adjust)
 585                mm->color_adjust(prev_node, mm->scan_color,
 586                                 &adj_start, &adj_end);
 587
 588        if (check_free_hole(adj_start, adj_end,
 589                            mm->scan_size, mm->scan_alignment)) {
 590                mm->scan_hit_start = hole_start;
 591                mm->scan_hit_end = hole_end;
 592                return 1;
 593        }
 594
 595        return 0;
 596}
 597EXPORT_SYMBOL(drm_mm_scan_add_block);
 598
 599/**
 600 * Remove a node from the scan list.
 601 *
 602 * Nodes _must_ be removed in the exact same order from the scan list as they
 603 * have been added, otherwise the internal state of the memory manager will be
 604 * corrupted.
 605 *
 606 * When the scan list is empty, the selected memory nodes can be freed. An
 607 * immediately following drm_mm_search_free with best_match = 0 will then return
 608 * the just freed block (because its at the top of the free_stack list).
 609 *
 610 * Returns one if this block should be evicted, zero otherwise. Will always
 611 * return zero when no hole has been found.
 612 */
 613int drm_mm_scan_remove_block(struct drm_mm_node *node)
 614{
 615        struct drm_mm *mm = node->mm;
 616        struct drm_mm_node *prev_node;
 617
 618        mm->scanned_blocks--;
 619
 620        BUG_ON(!node->scanned_block);
 621        node->scanned_block = 0;
 622
 623        prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
 624                               node_list);
 625
 626        prev_node->hole_follows = node->scanned_preceeds_hole;
 627        list_add(&node->node_list, &prev_node->node_list);
 628
 629         return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
 630                 node->start < mm->scan_hit_end);
 631}
 632EXPORT_SYMBOL(drm_mm_scan_remove_block);
 633
 634int drm_mm_clean(struct drm_mm * mm)
 635{
 636        struct list_head *head = &mm->head_node.node_list;
 637
 638        return (head->next->next == head);
 639}
 640EXPORT_SYMBOL(drm_mm_clean);
 641
 642int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 643{
 644        INIT_LIST_HEAD(&mm->hole_stack);
 645        INIT_LIST_HEAD(&mm->unused_nodes);
 646        mm->num_unused = 0;
 647        mm->scanned_blocks = 0;
 648        spin_lock_init(&mm->unused_lock);
 649
 650        /* Clever trick to avoid a special case in the free hole tracking. */
 651        INIT_LIST_HEAD(&mm->head_node.node_list);
 652        INIT_LIST_HEAD(&mm->head_node.hole_stack);
 653        mm->head_node.hole_follows = 1;
 654        mm->head_node.scanned_block = 0;
 655        mm->head_node.scanned_prev_free = 0;
 656        mm->head_node.scanned_next_free = 0;
 657        mm->head_node.mm = mm;
 658        mm->head_node.start = start + size;
 659        mm->head_node.size = start - mm->head_node.start;
 660        list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 661
 662        mm->color_adjust = NULL;
 663
 664        return 0;
 665}
 666EXPORT_SYMBOL(drm_mm_init);
 667
 668void drm_mm_takedown(struct drm_mm * mm)
 669{
 670        struct drm_mm_node *entry, *next;
 671
 672        if (!list_empty(&mm->head_node.node_list)) {
 673                DRM_ERROR("Memory manager not clean. Delaying takedown\n");
 674                return;
 675        }
 676
 677        spin_lock(&mm->unused_lock);
 678        list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
 679                list_del(&entry->node_list);
 680                kfree(entry);
 681                --mm->num_unused;
 682        }
 683        spin_unlock(&mm->unused_lock);
 684
 685        BUG_ON(mm->num_unused != 0);
 686}
 687EXPORT_SYMBOL(drm_mm_takedown);
 688
 689void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 690{
 691        struct drm_mm_node *entry;
 692        unsigned long total_used = 0, total_free = 0, total = 0;
 693        unsigned long hole_start, hole_end, hole_size;
 694
 695        hole_start = drm_mm_hole_node_start(&mm->head_node);
 696        hole_end = drm_mm_hole_node_end(&mm->head_node);
 697        hole_size = hole_end - hole_start;
 698        if (hole_size)
 699                printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
 700                        prefix, hole_start, hole_end,
 701                        hole_size);
 702        total_free += hole_size;
 703
 704        drm_mm_for_each_node(entry, mm) {
 705                printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
 706                        prefix, entry->start, entry->start + entry->size,
 707                        entry->size);
 708                total_used += entry->size;
 709
 710                if (entry->hole_follows) {
 711                        hole_start = drm_mm_hole_node_start(entry);
 712                        hole_end = drm_mm_hole_node_end(entry);
 713                        hole_size = hole_end - hole_start;
 714                        printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
 715                                prefix, hole_start, hole_end,
 716                                hole_size);
 717                        total_free += hole_size;
 718                }
 719        }
 720        total = total_free + total_used;
 721
 722        printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
 723                total_used, total_free);
 724}
 725EXPORT_SYMBOL(drm_mm_debug_table);
 726
 727#if defined(CONFIG_DEBUG_FS)
 728int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 729{
 730        struct drm_mm_node *entry;
 731        unsigned long total_used = 0, total_free = 0, total = 0;
 732        unsigned long hole_start, hole_end, hole_size;
 733
 734        hole_start = drm_mm_hole_node_start(&mm->head_node);
 735        hole_end = drm_mm_hole_node_end(&mm->head_node);
 736        hole_size = hole_end - hole_start;
 737        if (hole_size)
 738                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
 739                                hole_start, hole_end, hole_size);
 740        total_free += hole_size;
 741
 742        drm_mm_for_each_node(entry, mm) {
 743                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
 744                                entry->start, entry->start + entry->size,
 745                                entry->size);
 746                total_used += entry->size;
 747                if (entry->hole_follows) {
 748                        hole_start = drm_mm_hole_node_start(entry);
 749                        hole_end = drm_mm_hole_node_end(entry);
 750                        hole_size = hole_end - hole_start;
 751                        seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
 752                                        hole_start, hole_end, hole_size);
 753                        total_free += hole_size;
 754                }
 755        }
 756        total = total_free + total_used;
 757
 758        seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
 759        return 0;
 760}
 761EXPORT_SYMBOL(drm_mm_dump_table);
 762#endif
 763