linux/drivers/gpu/drm/drm_mm.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
   4 * Copyright 2016 Intel Corporation
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 *
  28 **************************************************************************/
  29
  30/*
  31 * Generic simple memory manager implementation. Intended to be used as a base
  32 * class implementation for more advanced memory managers.
  33 *
  34 * Note that the algorithm used is quite simple and there might be substantial
  35 * performance gains if a smarter free list is implemented. Currently it is
  36 * just an unordered stack of free regions. This could easily be improved if
  37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
  38 *
  39 * Aligned allocations can also see improvement.
  40 *
  41 * Authors:
  42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  43 */
  44
  45#include <linux/export.h>
  46#include <linux/interval_tree_generic.h>
  47#include <linux/seq_file.h>
  48#include <linux/slab.h>
  49#include <linux/stacktrace.h>
  50
  51#include <drm/drm_mm.h>
  52
  53/**
  54 * DOC: Overview
  55 *
  56 * drm_mm provides a simple range allocator. The drivers are free to use the
  57 * resource allocator from the linux core if it suits them, the upside of drm_mm
  58 * is that it's in the DRM core. Which means that it's easier to extend for
  59 * some of the crazier special purpose needs of gpus.
  60 *
  61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  62 * Drivers are free to embed either of them into their own suitable
  63 * datastructures. drm_mm itself will not do any memory allocations of its own,
  64 * so if drivers choose not to embed nodes they need to still allocate them
  65 * themselves.
  66 *
  67 * The range allocator also supports reservation of preallocated blocks. This is
  68 * useful for taking over initial mode setting configurations from the firmware,
  69 * where an object needs to be created which exactly matches the firmware's
  70 * scanout target. As long as the range is still free it can be inserted anytime
  71 * after the allocator is initialized, which helps with avoiding looped
  72 * dependencies in the driver load sequence.
  73 *
  74 * drm_mm maintains a stack of most recently freed holes, which of all
  75 * simplistic datastructures seems to be a fairly decent approach to clustering
  76 * allocations and avoiding too much fragmentation. This means free space
  77 * searches are O(num_holes). Given that all the fancy features drm_mm supports
  78 * something better would be fairly complex and since gfx thrashing is a fairly
  79 * steep cliff not a real concern. Removing a node again is O(1).
  80 *
  81 * drm_mm supports a few features: Alignment and range restrictions can be
  82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
  83 * opaque unsigned long) which in conjunction with a driver callback can be used
  84 * to implement sophisticated placement restrictions. The i915 DRM driver uses
  85 * this to implement guard pages between incompatible caching domains in the
  86 * graphics TT.
  87 *
  88 * Two behaviors are supported for searching and allocating: bottom-up and
  89 * top-down. The default is bottom-up. Top-down allocation can be used if the
  90 * memory area has different restrictions, or just to reduce fragmentation.
  91 *
  92 * Finally iteration helpers to walk all nodes and all holes are provided as are
  93 * some basic allocator dumpers for debugging.
  94 *
  95 * Note that this range allocator is not thread-safe, drivers need to protect
  96 * modifications with their own locking. The idea behind this is that for a full
  97 * memory manager additional data needs to be protected anyway, hence internal
  98 * locking would be fully redundant.
  99 */
 100
 101#ifdef CONFIG_DRM_DEBUG_MM
 102#include <linux/stackdepot.h>
 103
 104#define STACKDEPTH 32
 105#define BUFSZ 4096
 106
 107static noinline void save_stack(struct drm_mm_node *node)
 108{
 109        unsigned long entries[STACKDEPTH];
 110        unsigned int n;
 111
 112        n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 113
 114        /* May be called under spinlock, so avoid sleeping */
 115        node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 116}
 117
 118static void show_leaks(struct drm_mm *mm)
 119{
 120        struct drm_mm_node *node;
 121        unsigned long *entries;
 122        unsigned int nr_entries;
 123        char *buf;
 124
 125        buf = kmalloc(BUFSZ, GFP_KERNEL);
 126        if (!buf)
 127                return;
 128
 129        list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
 130                if (!node->stack) {
 131                        DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
 132                                  node->start, node->size);
 133                        continue;
 134                }
 135
 136                nr_entries = stack_depot_fetch(node->stack, &entries);
 137                stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
 138                DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
 139                          node->start, node->size, buf);
 140        }
 141
 142        kfree(buf);
 143}
 144
 145#undef STACKDEPTH
 146#undef BUFSZ
 147#else
 148static void save_stack(struct drm_mm_node *node) { }
 149static void show_leaks(struct drm_mm *mm) { }
 150#endif
 151
 152#define START(node) ((node)->start)
 153#define LAST(node)  ((node)->start + (node)->size - 1)
 154
 155INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
 156                     u64, __subtree_last,
 157                     START, LAST, static inline, drm_mm_interval_tree)
 158
 159struct drm_mm_node *
 160__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 161{
 162        return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
 163                                               start, last) ?: (struct drm_mm_node *)&mm->head_node;
 164}
 165EXPORT_SYMBOL(__drm_mm_interval_first);
 166
 167static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 168                                          struct drm_mm_node *node)
 169{
 170        struct drm_mm *mm = hole_node->mm;
 171        struct rb_node **link, *rb;
 172        struct drm_mm_node *parent;
 173        bool leftmost;
 174
 175        node->__subtree_last = LAST(node);
 176
 177        if (drm_mm_node_allocated(hole_node)) {
 178                rb = &hole_node->rb;
 179                while (rb) {
 180                        parent = rb_entry(rb, struct drm_mm_node, rb);
 181                        if (parent->__subtree_last >= node->__subtree_last)
 182                                break;
 183
 184                        parent->__subtree_last = node->__subtree_last;
 185                        rb = rb_parent(rb);
 186                }
 187
 188                rb = &hole_node->rb;
 189                link = &hole_node->rb.rb_right;
 190                leftmost = false;
 191        } else {
 192                rb = NULL;
 193                link = &mm->interval_tree.rb_root.rb_node;
 194                leftmost = true;
 195        }
 196
 197        while (*link) {
 198                rb = *link;
 199                parent = rb_entry(rb, struct drm_mm_node, rb);
 200                if (parent->__subtree_last < node->__subtree_last)
 201                        parent->__subtree_last = node->__subtree_last;
 202                if (node->start < parent->start) {
 203                        link = &parent->rb.rb_left;
 204                } else {
 205                        link = &parent->rb.rb_right;
 206                        leftmost = false;
 207                }
 208        }
 209
 210        rb_link_node(&node->rb, rb, link);
 211        rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
 212                                   &drm_mm_interval_tree_augment);
 213}
 214
 215#define HOLE_SIZE(NODE) ((NODE)->hole_size)
 216#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 217
 218static u64 rb_to_hole_size(struct rb_node *rb)
 219{
 220        return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 221}
 222
 223static void insert_hole_size(struct rb_root_cached *root,
 224                             struct drm_mm_node *node)
 225{
 226        struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
 227        u64 x = node->hole_size;
 228        bool first = true;
 229
 230        while (*link) {
 231                rb = *link;
 232                if (x > rb_to_hole_size(rb)) {
 233                        link = &rb->rb_left;
 234                } else {
 235                        link = &rb->rb_right;
 236                        first = false;
 237                }
 238        }
 239
 240        rb_link_node(&node->rb_hole_size, rb, link);
 241        rb_insert_color_cached(&node->rb_hole_size, root, first);
 242}
 243
 244RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
 245                         struct drm_mm_node, rb_hole_addr,
 246                         u64, subtree_max_hole, HOLE_SIZE)
 247
 248static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
 249{
 250        struct rb_node **link = &root->rb_node, *rb_parent = NULL;
 251        u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
 252        struct drm_mm_node *parent;
 253
 254        while (*link) {
 255                rb_parent = *link;
 256                parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
 257                if (parent->subtree_max_hole < subtree_max_hole)
 258                        parent->subtree_max_hole = subtree_max_hole;
 259                if (start < HOLE_ADDR(parent))
 260                        link = &parent->rb_hole_addr.rb_left;
 261                else
 262                        link = &parent->rb_hole_addr.rb_right;
 263        }
 264
 265        rb_link_node(&node->rb_hole_addr, rb_parent, link);
 266        rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
 267}
 268
 269static void add_hole(struct drm_mm_node *node)
 270{
 271        struct drm_mm *mm = node->mm;
 272
 273        node->hole_size =
 274                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 275        node->subtree_max_hole = node->hole_size;
 276        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 277
 278        insert_hole_size(&mm->holes_size, node);
 279        insert_hole_addr(&mm->holes_addr, node);
 280
 281        list_add(&node->hole_stack, &mm->hole_stack);
 282}
 283
 284static void rm_hole(struct drm_mm_node *node)
 285{
 286        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 287
 288        list_del(&node->hole_stack);
 289        rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
 290        rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
 291                           &augment_callbacks);
 292        node->hole_size = 0;
 293        node->subtree_max_hole = 0;
 294
 295        DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 296}
 297
 298static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 299{
 300        return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
 301}
 302
 303static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
 304{
 305        return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
 306}
 307
 308static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 309{
 310        struct rb_node *rb = mm->holes_size.rb_root.rb_node;
 311        struct drm_mm_node *best = NULL;
 312
 313        do {
 314                struct drm_mm_node *node =
 315                        rb_entry(rb, struct drm_mm_node, rb_hole_size);
 316
 317                if (size <= node->hole_size) {
 318                        best = node;
 319                        rb = rb->rb_right;
 320                } else {
 321                        rb = rb->rb_left;
 322                }
 323        } while (rb);
 324
 325        return best;
 326}
 327
 328static bool usable_hole_addr(struct rb_node *rb, u64 size)
 329{
 330        return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
 331}
 332
 333static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
 334{
 335        struct rb_node *rb = mm->holes_addr.rb_node;
 336        struct drm_mm_node *node = NULL;
 337
 338        while (rb) {
 339                u64 hole_start;
 340
 341                if (!usable_hole_addr(rb, size))
 342                        break;
 343
 344                node = rb_hole_addr_to_node(rb);
 345                hole_start = __drm_mm_hole_node_start(node);
 346
 347                if (addr < hole_start)
 348                        rb = node->rb_hole_addr.rb_left;
 349                else if (addr > hole_start + node->hole_size)
 350                        rb = node->rb_hole_addr.rb_right;
 351                else
 352                        break;
 353        }
 354
 355        return node;
 356}
 357
 358static struct drm_mm_node *
 359first_hole(struct drm_mm *mm,
 360           u64 start, u64 end, u64 size,
 361           enum drm_mm_insert_mode mode)
 362{
 363        switch (mode) {
 364        default:
 365        case DRM_MM_INSERT_BEST:
 366                return best_hole(mm, size);
 367
 368        case DRM_MM_INSERT_LOW:
 369                return find_hole_addr(mm, start, size);
 370
 371        case DRM_MM_INSERT_HIGH:
 372                return find_hole_addr(mm, end, size);
 373
 374        case DRM_MM_INSERT_EVICT:
 375                return list_first_entry_or_null(&mm->hole_stack,
 376                                                struct drm_mm_node,
 377                                                hole_stack);
 378        }
 379}
 380
 381/**
 382 * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
 383 * @name: name of function to declare
 384 * @first: first rb member to traverse (either rb_left or rb_right).
 385 * @last: last rb member to traverse (either rb_right or rb_left).
 386 *
 387 * This macro declares a function to return the next hole of the addr rb tree.
 388 * While traversing the tree we take the searched size into account and only
 389 * visit branches with potential big enough holes.
 390 */
 391
 392#define DECLARE_NEXT_HOLE_ADDR(name, first, last)                       \
 393static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size)    \
 394{                                                                       \
 395        struct rb_node *parent, *node = &entry->rb_hole_addr;           \
 396                                                                        \
 397        if (!entry || RB_EMPTY_NODE(node))                              \
 398                return NULL;                                            \
 399                                                                        \
 400        if (usable_hole_addr(node->first, size)) {                      \
 401                node = node->first;                                     \
 402                while (usable_hole_addr(node->last, size))              \
 403                        node = node->last;                              \
 404                return rb_hole_addr_to_node(node);                      \
 405        }                                                               \
 406                                                                        \
 407        while ((parent = rb_parent(node)) && node == parent->first)     \
 408                node = parent;                                          \
 409                                                                        \
 410        return rb_hole_addr_to_node(parent);                            \
 411}
 412
 413DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
 414DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
 415
 416static struct drm_mm_node *
 417next_hole(struct drm_mm *mm,
 418          struct drm_mm_node *node,
 419          u64 size,
 420          enum drm_mm_insert_mode mode)
 421{
 422        switch (mode) {
 423        default:
 424        case DRM_MM_INSERT_BEST:
 425                return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 426
 427        case DRM_MM_INSERT_LOW:
 428                return next_hole_low_addr(node, size);
 429
 430        case DRM_MM_INSERT_HIGH:
 431                return next_hole_high_addr(node, size);
 432
 433        case DRM_MM_INSERT_EVICT:
 434                node = list_next_entry(node, hole_stack);
 435                return &node->hole_stack == &mm->hole_stack ? NULL : node;
 436        }
 437}
 438
 439/**
 440 * drm_mm_reserve_node - insert an pre-initialized node
 441 * @mm: drm_mm allocator to insert @node into
 442 * @node: drm_mm_node to insert
 443 *
 444 * This functions inserts an already set-up &drm_mm_node into the allocator,
 445 * meaning that start, size and color must be set by the caller. All other
 446 * fields must be cleared to 0. This is useful to initialize the allocator with
 447 * preallocated objects which must be set-up before the range allocator can be
 448 * set-up, e.g. when taking over a firmware framebuffer.
 449 *
 450 * Returns:
 451 * 0 on success, -ENOSPC if there's no hole where @node is.
 452 */
 453int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 454{
 455        struct drm_mm_node *hole;
 456        u64 hole_start, hole_end;
 457        u64 adj_start, adj_end;
 458        u64 end;
 459
 460        end = node->start + node->size;
 461        if (unlikely(end <= node->start))
 462                return -ENOSPC;
 463
 464        /* Find the relevant hole to add our node to */
 465        hole = find_hole_addr(mm, node->start, 0);
 466        if (!hole)
 467                return -ENOSPC;
 468
 469        adj_start = hole_start = __drm_mm_hole_node_start(hole);
 470        adj_end = hole_end = hole_start + hole->hole_size;
 471
 472        if (mm->color_adjust)
 473                mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 474
 475        if (adj_start > node->start || adj_end < end)
 476                return -ENOSPC;
 477
 478        node->mm = mm;
 479
 480        __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 481        list_add(&node->node_list, &hole->node_list);
 482        drm_mm_interval_tree_add_node(hole, node);
 483        node->hole_size = 0;
 484
 485        rm_hole(hole);
 486        if (node->start > hole_start)
 487                add_hole(hole);
 488        if (end < hole_end)
 489                add_hole(node);
 490
 491        save_stack(node);
 492        return 0;
 493}
 494EXPORT_SYMBOL(drm_mm_reserve_node);
 495
 496static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
 497{
 498        return rb ? rb_to_hole_size(rb) : 0;
 499}
 500
 501/**
 502 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 503 * @mm: drm_mm to allocate from
 504 * @node: preallocate node to insert
 505 * @size: size of the allocation
 506 * @alignment: alignment of the allocation
 507 * @color: opaque tag value to use for this node
 508 * @range_start: start of the allowed range for this node
 509 * @range_end: end of the allowed range for this node
 510 * @mode: fine-tune the allocation search and placement
 511 *
 512 * The preallocated @node must be cleared to 0.
 513 *
 514 * Returns:
 515 * 0 on success, -ENOSPC if there's no suitable hole.
 516 */
 517int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 518                                struct drm_mm_node * const node,
 519                                u64 size, u64 alignment,
 520                                unsigned long color,
 521                                u64 range_start, u64 range_end,
 522                                enum drm_mm_insert_mode mode)
 523{
 524        struct drm_mm_node *hole;
 525        u64 remainder_mask;
 526        bool once;
 527
 528        DRM_MM_BUG_ON(range_start > range_end);
 529
 530        if (unlikely(size == 0 || range_end - range_start < size))
 531                return -ENOSPC;
 532
 533        if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
 534                return -ENOSPC;
 535
 536        if (alignment <= 1)
 537                alignment = 0;
 538
 539        once = mode & DRM_MM_INSERT_ONCE;
 540        mode &= ~DRM_MM_INSERT_ONCE;
 541
 542        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 543        for (hole = first_hole(mm, range_start, range_end, size, mode);
 544             hole;
 545             hole = once ? NULL : next_hole(mm, hole, size, mode)) {
 546                u64 hole_start = __drm_mm_hole_node_start(hole);
 547                u64 hole_end = hole_start + hole->hole_size;
 548                u64 adj_start, adj_end;
 549                u64 col_start, col_end;
 550
 551                if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
 552                        break;
 553
 554                if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
 555                        break;
 556
 557                col_start = hole_start;
 558                col_end = hole_end;
 559                if (mm->color_adjust)
 560                        mm->color_adjust(hole, color, &col_start, &col_end);
 561
 562                adj_start = max(col_start, range_start);
 563                adj_end = min(col_end, range_end);
 564
 565                if (adj_end <= adj_start || adj_end - adj_start < size)
 566                        continue;
 567
 568                if (mode == DRM_MM_INSERT_HIGH)
 569                        adj_start = adj_end - size;
 570
 571                if (alignment) {
 572                        u64 rem;
 573
 574                        if (likely(remainder_mask))
 575                                rem = adj_start & remainder_mask;
 576                        else
 577                                div64_u64_rem(adj_start, alignment, &rem);
 578                        if (rem) {
 579                                adj_start -= rem;
 580                                if (mode != DRM_MM_INSERT_HIGH)
 581                                        adj_start += alignment;
 582
 583                                if (adj_start < max(col_start, range_start) ||
 584                                    min(col_end, range_end) - adj_start < size)
 585                                        continue;
 586
 587                                if (adj_end <= adj_start ||
 588                                    adj_end - adj_start < size)
 589                                        continue;
 590                        }
 591                }
 592
 593                node->mm = mm;
 594                node->size = size;
 595                node->start = adj_start;
 596                node->color = color;
 597                node->hole_size = 0;
 598
 599                __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 600                list_add(&node->node_list, &hole->node_list);
 601                drm_mm_interval_tree_add_node(hole, node);
 602
 603                rm_hole(hole);
 604                if (adj_start > hole_start)
 605                        add_hole(hole);
 606                if (adj_start + size < hole_end)
 607                        add_hole(node);
 608
 609                save_stack(node);
 610                return 0;
 611        }
 612
 613        return -ENOSPC;
 614}
 615EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 616
 617static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
 618{
 619        return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 620}
 621
 622/**
 623 * drm_mm_remove_node - Remove a memory node from the allocator.
 624 * @node: drm_mm_node to remove
 625 *
 626 * This just removes a node from its drm_mm allocator. The node does not need to
 627 * be cleared again before it can be re-inserted into this or any other drm_mm
 628 * allocator. It is a bug to call this function on a unallocated node.
 629 */
 630void drm_mm_remove_node(struct drm_mm_node *node)
 631{
 632        struct drm_mm *mm = node->mm;
 633        struct drm_mm_node *prev_node;
 634
 635        DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 636        DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 637
 638        prev_node = list_prev_entry(node, node_list);
 639
 640        if (drm_mm_hole_follows(node))
 641                rm_hole(node);
 642
 643        drm_mm_interval_tree_remove(node, &mm->interval_tree);
 644        list_del(&node->node_list);
 645
 646        if (drm_mm_hole_follows(prev_node))
 647                rm_hole(prev_node);
 648        add_hole(prev_node);
 649
 650        clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 651}
 652EXPORT_SYMBOL(drm_mm_remove_node);
 653
 654/**
 655 * drm_mm_replace_node - move an allocation from @old to @new
 656 * @old: drm_mm_node to remove from the allocator
 657 * @new: drm_mm_node which should inherit @old's allocation
 658 *
 659 * This is useful for when drivers embed the drm_mm_node structure and hence
 660 * can't move allocations by reassigning pointers. It's a combination of remove
 661 * and insert with the guarantee that the allocation start will match.
 662 */
 663void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 664{
 665        struct drm_mm *mm = old->mm;
 666
 667        DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
 668
 669        *new = *old;
 670
 671        __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
 672        list_replace(&old->node_list, &new->node_list);
 673        rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 674
 675        if (drm_mm_hole_follows(old)) {
 676                list_replace(&old->hole_stack, &new->hole_stack);
 677                rb_replace_node_cached(&old->rb_hole_size,
 678                                       &new->rb_hole_size,
 679                                       &mm->holes_size);
 680                rb_replace_node(&old->rb_hole_addr,
 681                                &new->rb_hole_addr,
 682                                &mm->holes_addr);
 683        }
 684
 685        clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
 686}
 687EXPORT_SYMBOL(drm_mm_replace_node);
 688
 689/**
 690 * DOC: lru scan roster
 691 *
 692 * Very often GPUs need to have continuous allocations for a given object. When
 693 * evicting objects to make space for a new one it is therefore not most
 694 * efficient when we simply start to select all objects from the tail of an LRU
 695 * until there's a suitable hole: Especially for big objects or nodes that
 696 * otherwise have special allocation constraints there's a good chance we evict
 697 * lots of (smaller) objects unnecessarily.
 698 *
 699 * The DRM range allocator supports this use-case through the scanning
 700 * interfaces. First a scan operation needs to be initialized with
 701 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
 702 * objects to the roster, probably by walking an LRU list, but this can be
 703 * freely implemented. Eviction candidates are added using
 704 * drm_mm_scan_add_block() until a suitable hole is found or there are no
 705 * further evictable objects. Eviction roster metadata is tracked in &struct
 706 * drm_mm_scan.
 707 *
 708 * The driver must walk through all objects again in exactly the reverse
 709 * order to restore the allocator state. Note that while the allocator is used
 710 * in the scan mode no other operation is allowed.
 711 *
 712 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
 713 * reported true) in the scan, and any overlapping nodes after color adjustment
 714 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
 715 * since freeing a node is also O(1) the overall complexity is
 716 * O(scanned_objects). So like the free stack which needs to be walked before a
 717 * scan operation even begins this is linear in the number of objects. It
 718 * doesn't seem to hurt too badly.
 719 */
 720
 721/**
 722 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 723 * @scan: scan state
 724 * @mm: drm_mm to scan
 725 * @size: size of the allocation
 726 * @alignment: alignment of the allocation
 727 * @color: opaque tag value to use for the allocation
 728 * @start: start of the allowed range for the allocation
 729 * @end: end of the allowed range for the allocation
 730 * @mode: fine-tune the allocation search and placement
 731 *
 732 * This simply sets up the scanning routines with the parameters for the desired
 733 * hole.
 734 *
 735 * Warning:
 736 * As long as the scan list is non-empty, no other operations than
 737 * adding/removing nodes to/from the scan list are allowed.
 738 */
 739void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 740                                 struct drm_mm *mm,
 741                                 u64 size,
 742                                 u64 alignment,
 743                                 unsigned long color,
 744                                 u64 start,
 745                                 u64 end,
 746                                 enum drm_mm_insert_mode mode)
 747{
 748        DRM_MM_BUG_ON(start >= end);
 749        DRM_MM_BUG_ON(!size || size > end - start);
 750        DRM_MM_BUG_ON(mm->scan_active);
 751
 752        scan->mm = mm;
 753
 754        if (alignment <= 1)
 755                alignment = 0;
 756
 757        scan->color = color;
 758        scan->alignment = alignment;
 759        scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 760        scan->size = size;
 761        scan->mode = mode;
 762
 763        DRM_MM_BUG_ON(end <= start);
 764        scan->range_start = start;
 765        scan->range_end = end;
 766
 767        scan->hit_start = U64_MAX;
 768        scan->hit_end = 0;
 769}
 770EXPORT_SYMBOL(drm_mm_scan_init_with_range);
 771
 772/**
 773 * drm_mm_scan_add_block - add a node to the scan list
 774 * @scan: the active drm_mm scanner
 775 * @node: drm_mm_node to add
 776 *
 777 * Add a node to the scan list that might be freed to make space for the desired
 778 * hole.
 779 *
 780 * Returns:
 781 * True if a hole has been found, false otherwise.
 782 */
 783bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 784                           struct drm_mm_node *node)
 785{
 786        struct drm_mm *mm = scan->mm;
 787        struct drm_mm_node *hole;
 788        u64 hole_start, hole_end;
 789        u64 col_start, col_end;
 790        u64 adj_start, adj_end;
 791
 792        DRM_MM_BUG_ON(node->mm != mm);
 793        DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 794        DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 795        __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 796        mm->scan_active++;
 797
 798        /* Remove this block from the node_list so that we enlarge the hole
 799         * (distance between the end of our previous node and the start of
 800         * or next), without poisoning the link so that we can restore it
 801         * later in drm_mm_scan_remove_block().
 802         */
 803        hole = list_prev_entry(node, node_list);
 804        DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
 805        __list_del_entry(&node->node_list);
 806
 807        hole_start = __drm_mm_hole_node_start(hole);
 808        hole_end = __drm_mm_hole_node_end(hole);
 809
 810        col_start = hole_start;
 811        col_end = hole_end;
 812        if (mm->color_adjust)
 813                mm->color_adjust(hole, scan->color, &col_start, &col_end);
 814
 815        adj_start = max(col_start, scan->range_start);
 816        adj_end = min(col_end, scan->range_end);
 817        if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 818                return false;
 819
 820        if (scan->mode == DRM_MM_INSERT_HIGH)
 821                adj_start = adj_end - scan->size;
 822
 823        if (scan->alignment) {
 824                u64 rem;
 825
 826                if (likely(scan->remainder_mask))
 827                        rem = adj_start & scan->remainder_mask;
 828                else
 829                        div64_u64_rem(adj_start, scan->alignment, &rem);
 830                if (rem) {
 831                        adj_start -= rem;
 832                        if (scan->mode != DRM_MM_INSERT_HIGH)
 833                                adj_start += scan->alignment;
 834                        if (adj_start < max(col_start, scan->range_start) ||
 835                            min(col_end, scan->range_end) - adj_start < scan->size)
 836                                return false;
 837
 838                        if (adj_end <= adj_start ||
 839                            adj_end - adj_start < scan->size)
 840                                return false;
 841                }
 842        }
 843
 844        scan->hit_start = adj_start;
 845        scan->hit_end = adj_start + scan->size;
 846
 847        DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
 848        DRM_MM_BUG_ON(scan->hit_start < hole_start);
 849        DRM_MM_BUG_ON(scan->hit_end > hole_end);
 850
 851        return true;
 852}
 853EXPORT_SYMBOL(drm_mm_scan_add_block);
 854
 855/**
 856 * drm_mm_scan_remove_block - remove a node from the scan list
 857 * @scan: the active drm_mm scanner
 858 * @node: drm_mm_node to remove
 859 *
 860 * Nodes **must** be removed in exactly the reverse order from the scan list as
 861 * they have been added (e.g. using list_add() as they are added and then
 862 * list_for_each() over that eviction list to remove), otherwise the internal
 863 * state of the memory manager will be corrupted.
 864 *
 865 * When the scan list is empty, the selected memory nodes can be freed. An
 866 * immediately following drm_mm_insert_node_in_range_generic() or one of the
 867 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
 868 * the just freed block (because it's at the top of the free_stack list).
 869 *
 870 * Returns:
 871 * True if this block should be evicted, false otherwise. Will always
 872 * return false when no hole has been found.
 873 */
 874bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 875                              struct drm_mm_node *node)
 876{
 877        struct drm_mm_node *prev_node;
 878
 879        DRM_MM_BUG_ON(node->mm != scan->mm);
 880        DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
 881        __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 882
 883        DRM_MM_BUG_ON(!node->mm->scan_active);
 884        node->mm->scan_active--;
 885
 886        /* During drm_mm_scan_add_block() we decoupled this node leaving
 887         * its pointers intact. Now that the caller is walking back along
 888         * the eviction list we can restore this block into its rightful
 889         * place on the full node_list. To confirm that the caller is walking
 890         * backwards correctly we check that prev_node->next == node->next,
 891         * i.e. both believe the same node should be on the other side of the
 892         * hole.
 893         */
 894        prev_node = list_prev_entry(node, node_list);
 895        DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
 896                      list_next_entry(node, node_list));
 897        list_add(&node->node_list, &prev_node->node_list);
 898
 899        return (node->start + node->size > scan->hit_start &&
 900                node->start < scan->hit_end);
 901}
 902EXPORT_SYMBOL(drm_mm_scan_remove_block);
 903
 904/**
 905 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
 906 * @scan: drm_mm scan with target hole
 907 *
 908 * After completing an eviction scan and removing the selected nodes, we may
 909 * need to remove a few more nodes from either side of the target hole if
 910 * mm.color_adjust is being used.
 911 *
 912 * Returns:
 913 * A node to evict, or NULL if there are no overlapping nodes.
 914 */
 915struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 916{
 917        struct drm_mm *mm = scan->mm;
 918        struct drm_mm_node *hole;
 919        u64 hole_start, hole_end;
 920
 921        DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
 922
 923        if (!mm->color_adjust)
 924                return NULL;
 925
 926        /*
 927         * The hole found during scanning should ideally be the first element
 928         * in the hole_stack list, but due to side-effects in the driver it
 929         * may not be.
 930         */
 931        list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
 932                hole_start = __drm_mm_hole_node_start(hole);
 933                hole_end = hole_start + hole->hole_size;
 934
 935                if (hole_start <= scan->hit_start &&
 936                    hole_end >= scan->hit_end)
 937                        break;
 938        }
 939
 940        /* We should only be called after we found the hole previously */
 941        DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
 942        if (unlikely(&hole->hole_stack == &mm->hole_stack))
 943                return NULL;
 944
 945        DRM_MM_BUG_ON(hole_start > scan->hit_start);
 946        DRM_MM_BUG_ON(hole_end < scan->hit_end);
 947
 948        mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
 949        if (hole_start > scan->hit_start)
 950                return hole;
 951        if (hole_end < scan->hit_end)
 952                return list_next_entry(hole, node_list);
 953
 954        return NULL;
 955}
 956EXPORT_SYMBOL(drm_mm_scan_color_evict);
 957
 958/**
 959 * drm_mm_init - initialize a drm-mm allocator
 960 * @mm: the drm_mm structure to initialize
 961 * @start: start of the range managed by @mm
 962 * @size: end of the range managed by @mm
 963 *
 964 * Note that @mm must be cleared to 0 before calling this function.
 965 */
 966void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 967{
 968        DRM_MM_BUG_ON(start + size <= start);
 969
 970        mm->color_adjust = NULL;
 971
 972        INIT_LIST_HEAD(&mm->hole_stack);
 973        mm->interval_tree = RB_ROOT_CACHED;
 974        mm->holes_size = RB_ROOT_CACHED;
 975        mm->holes_addr = RB_ROOT;
 976
 977        /* Clever trick to avoid a special case in the free hole tracking. */
 978        INIT_LIST_HEAD(&mm->head_node.node_list);
 979        mm->head_node.flags = 0;
 980        mm->head_node.mm = mm;
 981        mm->head_node.start = start + size;
 982        mm->head_node.size = -size;
 983        add_hole(&mm->head_node);
 984
 985        mm->scan_active = 0;
 986}
 987EXPORT_SYMBOL(drm_mm_init);
 988
 989/**
 990 * drm_mm_takedown - clean up a drm_mm allocator
 991 * @mm: drm_mm allocator to clean up
 992 *
 993 * Note that it is a bug to call this function on an allocator which is not
 994 * clean.
 995 */
 996void drm_mm_takedown(struct drm_mm *mm)
 997{
 998        if (WARN(!drm_mm_clean(mm),
 999                 "Memory manager not clean during takedown.\n"))
1000                show_leaks(mm);
1001}
1002EXPORT_SYMBOL(drm_mm_takedown);
1003
1004static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1005{
1006        u64 start, size;
1007
1008        size = entry->hole_size;
1009        if (size) {
1010                start = drm_mm_hole_node_start(entry);
1011                drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1012                           start, start + size, size);
1013        }
1014
1015        return size;
1016}
1017/**
1018 * drm_mm_print - print allocator state
1019 * @mm: drm_mm allocator to print
1020 * @p: DRM printer to use
1021 */
1022void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1023{
1024        const struct drm_mm_node *entry;
1025        u64 total_used = 0, total_free = 0, total = 0;
1026
1027        total_free += drm_mm_dump_hole(p, &mm->head_node);
1028
1029        drm_mm_for_each_node(entry, mm) {
1030                drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1031                           entry->start + entry->size, entry->size);
1032                total_used += entry->size;
1033                total_free += drm_mm_dump_hole(p, entry);
1034        }
1035        total = total_free + total_used;
1036
1037        drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1038                   total_used, total_free);
1039}
1040EXPORT_SYMBOL(drm_mm_print);
1041