linux/drivers/gpu/drm/drm_mm.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
   4 * Copyright 2016 Intel Corporation
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 *
  28 **************************************************************************/
  29
  30/*
  31 * Generic simple memory manager implementation. Intended to be used as a base
  32 * class implementation for more advanced memory managers.
  33 *
  34 * Note that the algorithm used is quite simple and there might be substantial
  35 * performance gains if a smarter free list is implemented. Currently it is
  36 * just an unordered stack of free regions. This could easily be improved if
  37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
  38 *
  39 * Aligned allocations can also see improvement.
  40 *
  41 * Authors:
  42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  43 */
  44
  45#include <drm/drmP.h>
  46#include <drm/drm_mm.h>
  47#include <linux/slab.h>
  48#include <linux/seq_file.h>
  49#include <linux/export.h>
  50#include <linux/interval_tree_generic.h>
  51
  52/**
  53 * DOC: Overview
  54 *
  55 * drm_mm provides a simple range allocator. The drivers are free to use the
  56 * resource allocator from the linux core if it suits them, the upside of drm_mm
  57 * is that it's in the DRM core. Which means that it's easier to extend for
  58 * some of the crazier special purpose needs of gpus.
  59 *
  60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  61 * Drivers are free to embed either of them into their own suitable
  62 * datastructures. drm_mm itself will not do any memory allocations of its own,
  63 * so if drivers choose not to embed nodes they need to still allocate them
  64 * themselves.
  65 *
  66 * The range allocator also supports reservation of preallocated blocks. This is
  67 * useful for taking over initial mode setting configurations from the firmware,
  68 * where an object needs to be created which exactly matches the firmware's
  69 * scanout target. As long as the range is still free it can be inserted anytime
  70 * after the allocator is initialized, which helps with avoiding looped
  71 * dependencies in the driver load sequence.
  72 *
  73 * drm_mm maintains a stack of most recently freed holes, which of all
  74 * simplistic datastructures seems to be a fairly decent approach to clustering
  75 * allocations and avoiding too much fragmentation. This means free space
  76 * searches are O(num_holes). Given that all the fancy features drm_mm supports
  77 * something better would be fairly complex and since gfx thrashing is a fairly
  78 * steep cliff not a real concern. Removing a node again is O(1).
  79 *
  80 * drm_mm supports a few features: Alignment and range restrictions can be
  81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
  82 * opaque unsigned long) which in conjunction with a driver callback can be used
  83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
  84 * this to implement guard pages between incompatible caching domains in the
  85 * graphics TT.
  86 *
  87 * Two behaviors are supported for searching and allocating: bottom-up and
  88 * top-down. The default is bottom-up. Top-down allocation can be used if the
  89 * memory area has different restrictions, or just to reduce fragmentation.
  90 *
  91 * Finally iteration helpers to walk all nodes and all holes are provided as are
  92 * some basic allocator dumpers for debugging.
  93 *
  94 * Note that this range allocator is not thread-safe, drivers need to protect
  95 * modifications with their own locking. The idea behind this is that for a full
  96 * memory manager additional data needs to be protected anyway, hence internal
  97 * locking would be fully redundant.
  98 */
  99
 100#ifdef CONFIG_DRM_DEBUG_MM
 101#include <linux/stackdepot.h>
 102
 103#define STACKDEPTH 32
 104#define BUFSZ 4096
 105
 106static noinline void save_stack(struct drm_mm_node *node)
 107{
 108        unsigned long entries[STACKDEPTH];
 109        unsigned int n;
 110
 111        n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 112
 113        /* May be called under spinlock, so avoid sleeping */
 114        node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 115}
 116
 117static void show_leaks(struct drm_mm *mm)
 118{
 119        struct drm_mm_node *node;
 120        unsigned long *entries;
 121        unsigned int nr_entries;
 122        char *buf;
 123
 124        buf = kmalloc(BUFSZ, GFP_KERNEL);
 125        if (!buf)
 126                return;
 127
 128        list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
 129                if (!node->stack) {
 130                        DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
 131                                  node->start, node->size);
 132                        continue;
 133                }
 134
 135                nr_entries = stack_depot_fetch(node->stack, &entries);
 136                stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
 137                DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
 138                          node->start, node->size, buf);
 139        }
 140
 141        kfree(buf);
 142}
 143
 144#undef STACKDEPTH
 145#undef BUFSZ
 146#else
 147static void save_stack(struct drm_mm_node *node) { }
 148static void show_leaks(struct drm_mm *mm) { }
 149#endif
 150
 151#define START(node) ((node)->start)
 152#define LAST(node)  ((node)->start + (node)->size - 1)
 153
 154INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
 155                     u64, __subtree_last,
 156                     START, LAST, static inline, drm_mm_interval_tree)
 157
 158struct drm_mm_node *
 159__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 160{
 161        return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
 162                                               start, last) ?: (struct drm_mm_node *)&mm->head_node;
 163}
 164EXPORT_SYMBOL(__drm_mm_interval_first);
 165
 166static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 167                                          struct drm_mm_node *node)
 168{
 169        struct drm_mm *mm = hole_node->mm;
 170        struct rb_node **link, *rb;
 171        struct drm_mm_node *parent;
 172        bool leftmost;
 173
 174        node->__subtree_last = LAST(node);
 175
 176        if (hole_node->allocated) {
 177                rb = &hole_node->rb;
 178                while (rb) {
 179                        parent = rb_entry(rb, struct drm_mm_node, rb);
 180                        if (parent->__subtree_last >= node->__subtree_last)
 181                                break;
 182
 183                        parent->__subtree_last = node->__subtree_last;
 184                        rb = rb_parent(rb);
 185                }
 186
 187                rb = &hole_node->rb;
 188                link = &hole_node->rb.rb_right;
 189                leftmost = false;
 190        } else {
 191                rb = NULL;
 192                link = &mm->interval_tree.rb_root.rb_node;
 193                leftmost = true;
 194        }
 195
 196        while (*link) {
 197                rb = *link;
 198                parent = rb_entry(rb, struct drm_mm_node, rb);
 199                if (parent->__subtree_last < node->__subtree_last)
 200                        parent->__subtree_last = node->__subtree_last;
 201                if (node->start < parent->start) {
 202                        link = &parent->rb.rb_left;
 203                } else {
 204                        link = &parent->rb.rb_right;
 205                        leftmost = false;
 206                }
 207        }
 208
 209        rb_link_node(&node->rb, rb, link);
 210        rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
 211                                   &drm_mm_interval_tree_augment);
 212}
 213
 214#define RB_INSERT(root, member, expr) do { \
 215        struct rb_node **link = &root.rb_node, *rb = NULL; \
 216        u64 x = expr(node); \
 217        while (*link) { \
 218                rb = *link; \
 219                if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
 220                        link = &rb->rb_left; \
 221                else \
 222                        link = &rb->rb_right; \
 223        } \
 224        rb_link_node(&node->member, rb, link); \
 225        rb_insert_color(&node->member, &root); \
 226} while (0)
 227
 228#define HOLE_SIZE(NODE) ((NODE)->hole_size)
 229#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 230
 231static u64 rb_to_hole_size(struct rb_node *rb)
 232{
 233        return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 234}
 235
 236static void insert_hole_size(struct rb_root_cached *root,
 237                             struct drm_mm_node *node)
 238{
 239        struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
 240        u64 x = node->hole_size;
 241        bool first = true;
 242
 243        while (*link) {
 244                rb = *link;
 245                if (x > rb_to_hole_size(rb)) {
 246                        link = &rb->rb_left;
 247                } else {
 248                        link = &rb->rb_right;
 249                        first = false;
 250                }
 251        }
 252
 253        rb_link_node(&node->rb_hole_size, rb, link);
 254        rb_insert_color_cached(&node->rb_hole_size, root, first);
 255}
 256
 257static void add_hole(struct drm_mm_node *node)
 258{
 259        struct drm_mm *mm = node->mm;
 260
 261        node->hole_size =
 262                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 263        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 264
 265        insert_hole_size(&mm->holes_size, node);
 266        RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 267
 268        list_add(&node->hole_stack, &mm->hole_stack);
 269}
 270
 271static void rm_hole(struct drm_mm_node *node)
 272{
 273        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 274
 275        list_del(&node->hole_stack);
 276        rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
 277        rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
 278        node->hole_size = 0;
 279
 280        DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 281}
 282
 283static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 284{
 285        return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
 286}
 287
 288static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
 289{
 290        return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
 291}
 292
 293static inline u64 rb_hole_size(struct rb_node *rb)
 294{
 295        return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 296}
 297
 298static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 299{
 300        struct rb_node *rb = mm->holes_size.rb_root.rb_node;
 301        struct drm_mm_node *best = NULL;
 302
 303        do {
 304                struct drm_mm_node *node =
 305                        rb_entry(rb, struct drm_mm_node, rb_hole_size);
 306
 307                if (size <= node->hole_size) {
 308                        best = node;
 309                        rb = rb->rb_right;
 310                } else {
 311                        rb = rb->rb_left;
 312                }
 313        } while (rb);
 314
 315        return best;
 316}
 317
 318static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
 319{
 320        struct rb_node *rb = mm->holes_addr.rb_node;
 321        struct drm_mm_node *node = NULL;
 322
 323        while (rb) {
 324                u64 hole_start;
 325
 326                node = rb_hole_addr_to_node(rb);
 327                hole_start = __drm_mm_hole_node_start(node);
 328
 329                if (addr < hole_start)
 330                        rb = node->rb_hole_addr.rb_left;
 331                else if (addr > hole_start + node->hole_size)
 332                        rb = node->rb_hole_addr.rb_right;
 333                else
 334                        break;
 335        }
 336
 337        return node;
 338}
 339
 340static struct drm_mm_node *
 341first_hole(struct drm_mm *mm,
 342           u64 start, u64 end, u64 size,
 343           enum drm_mm_insert_mode mode)
 344{
 345        switch (mode) {
 346        default:
 347        case DRM_MM_INSERT_BEST:
 348                return best_hole(mm, size);
 349
 350        case DRM_MM_INSERT_LOW:
 351                return find_hole(mm, start);
 352
 353        case DRM_MM_INSERT_HIGH:
 354                return find_hole(mm, end);
 355
 356        case DRM_MM_INSERT_EVICT:
 357                return list_first_entry_or_null(&mm->hole_stack,
 358                                                struct drm_mm_node,
 359                                                hole_stack);
 360        }
 361}
 362
 363static struct drm_mm_node *
 364next_hole(struct drm_mm *mm,
 365          struct drm_mm_node *node,
 366          enum drm_mm_insert_mode mode)
 367{
 368        switch (mode) {
 369        default:
 370        case DRM_MM_INSERT_BEST:
 371                return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 372
 373        case DRM_MM_INSERT_LOW:
 374                return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
 375
 376        case DRM_MM_INSERT_HIGH:
 377                return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
 378
 379        case DRM_MM_INSERT_EVICT:
 380                node = list_next_entry(node, hole_stack);
 381                return &node->hole_stack == &mm->hole_stack ? NULL : node;
 382        }
 383}
 384
 385/**
 386 * drm_mm_reserve_node - insert an pre-initialized node
 387 * @mm: drm_mm allocator to insert @node into
 388 * @node: drm_mm_node to insert
 389 *
 390 * This functions inserts an already set-up &drm_mm_node into the allocator,
 391 * meaning that start, size and color must be set by the caller. All other
 392 * fields must be cleared to 0. This is useful to initialize the allocator with
 393 * preallocated objects which must be set-up before the range allocator can be
 394 * set-up, e.g. when taking over a firmware framebuffer.
 395 *
 396 * Returns:
 397 * 0 on success, -ENOSPC if there's no hole where @node is.
 398 */
 399int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 400{
 401        u64 end = node->start + node->size;
 402        struct drm_mm_node *hole;
 403        u64 hole_start, hole_end;
 404        u64 adj_start, adj_end;
 405
 406        end = node->start + node->size;
 407        if (unlikely(end <= node->start))
 408                return -ENOSPC;
 409
 410        /* Find the relevant hole to add our node to */
 411        hole = find_hole(mm, node->start);
 412        if (!hole)
 413                return -ENOSPC;
 414
 415        adj_start = hole_start = __drm_mm_hole_node_start(hole);
 416        adj_end = hole_end = hole_start + hole->hole_size;
 417
 418        if (mm->color_adjust)
 419                mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 420
 421        if (adj_start > node->start || adj_end < end)
 422                return -ENOSPC;
 423
 424        node->mm = mm;
 425
 426        list_add(&node->node_list, &hole->node_list);
 427        drm_mm_interval_tree_add_node(hole, node);
 428        node->allocated = true;
 429        node->hole_size = 0;
 430
 431        rm_hole(hole);
 432        if (node->start > hole_start)
 433                add_hole(hole);
 434        if (end < hole_end)
 435                add_hole(node);
 436
 437        save_stack(node);
 438        return 0;
 439}
 440EXPORT_SYMBOL(drm_mm_reserve_node);
 441
 442static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
 443{
 444        return rb ? rb_to_hole_size(rb) : 0;
 445}
 446
 447/**
 448 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 449 * @mm: drm_mm to allocate from
 450 * @node: preallocate node to insert
 451 * @size: size of the allocation
 452 * @alignment: alignment of the allocation
 453 * @color: opaque tag value to use for this node
 454 * @range_start: start of the allowed range for this node
 455 * @range_end: end of the allowed range for this node
 456 * @mode: fine-tune the allocation search and placement
 457 *
 458 * The preallocated @node must be cleared to 0.
 459 *
 460 * Returns:
 461 * 0 on success, -ENOSPC if there's no suitable hole.
 462 */
 463int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 464                                struct drm_mm_node * const node,
 465                                u64 size, u64 alignment,
 466                                unsigned long color,
 467                                u64 range_start, u64 range_end,
 468                                enum drm_mm_insert_mode mode)
 469{
 470        struct drm_mm_node *hole;
 471        u64 remainder_mask;
 472        bool once;
 473
 474        DRM_MM_BUG_ON(range_start >= range_end);
 475
 476        if (unlikely(size == 0 || range_end - range_start < size))
 477                return -ENOSPC;
 478
 479        if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
 480                return -ENOSPC;
 481
 482        if (alignment <= 1)
 483                alignment = 0;
 484
 485        once = mode & DRM_MM_INSERT_ONCE;
 486        mode &= ~DRM_MM_INSERT_ONCE;
 487
 488        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 489        for (hole = first_hole(mm, range_start, range_end, size, mode);
 490             hole;
 491             hole = once ? NULL : next_hole(mm, hole, mode)) {
 492                u64 hole_start = __drm_mm_hole_node_start(hole);
 493                u64 hole_end = hole_start + hole->hole_size;
 494                u64 adj_start, adj_end;
 495                u64 col_start, col_end;
 496
 497                if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
 498                        break;
 499
 500                if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
 501                        break;
 502
 503                col_start = hole_start;
 504                col_end = hole_end;
 505                if (mm->color_adjust)
 506                        mm->color_adjust(hole, color, &col_start, &col_end);
 507
 508                adj_start = max(col_start, range_start);
 509                adj_end = min(col_end, range_end);
 510
 511                if (adj_end <= adj_start || adj_end - adj_start < size)
 512                        continue;
 513
 514                if (mode == DRM_MM_INSERT_HIGH)
 515                        adj_start = adj_end - size;
 516
 517                if (alignment) {
 518                        u64 rem;
 519
 520                        if (likely(remainder_mask))
 521                                rem = adj_start & remainder_mask;
 522                        else
 523                                div64_u64_rem(adj_start, alignment, &rem);
 524                        if (rem) {
 525                                adj_start -= rem;
 526                                if (mode != DRM_MM_INSERT_HIGH)
 527                                        adj_start += alignment;
 528
 529                                if (adj_start < max(col_start, range_start) ||
 530                                    min(col_end, range_end) - adj_start < size)
 531                                        continue;
 532
 533                                if (adj_end <= adj_start ||
 534                                    adj_end - adj_start < size)
 535                                        continue;
 536                        }
 537                }
 538
 539                node->mm = mm;
 540                node->size = size;
 541                node->start = adj_start;
 542                node->color = color;
 543                node->hole_size = 0;
 544
 545                list_add(&node->node_list, &hole->node_list);
 546                drm_mm_interval_tree_add_node(hole, node);
 547                node->allocated = true;
 548
 549                rm_hole(hole);
 550                if (adj_start > hole_start)
 551                        add_hole(hole);
 552                if (adj_start + size < hole_end)
 553                        add_hole(node);
 554
 555                save_stack(node);
 556                return 0;
 557        }
 558
 559        return -ENOSPC;
 560}
 561EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 562
 563/**
 564 * drm_mm_remove_node - Remove a memory node from the allocator.
 565 * @node: drm_mm_node to remove
 566 *
 567 * This just removes a node from its drm_mm allocator. The node does not need to
 568 * be cleared again before it can be re-inserted into this or any other drm_mm
 569 * allocator. It is a bug to call this function on a unallocated node.
 570 */
 571void drm_mm_remove_node(struct drm_mm_node *node)
 572{
 573        struct drm_mm *mm = node->mm;
 574        struct drm_mm_node *prev_node;
 575
 576        DRM_MM_BUG_ON(!node->allocated);
 577        DRM_MM_BUG_ON(node->scanned_block);
 578
 579        prev_node = list_prev_entry(node, node_list);
 580
 581        if (drm_mm_hole_follows(node))
 582                rm_hole(node);
 583
 584        drm_mm_interval_tree_remove(node, &mm->interval_tree);
 585        list_del(&node->node_list);
 586        node->allocated = false;
 587
 588        if (drm_mm_hole_follows(prev_node))
 589                rm_hole(prev_node);
 590        add_hole(prev_node);
 591}
 592EXPORT_SYMBOL(drm_mm_remove_node);
 593
 594/**
 595 * drm_mm_replace_node - move an allocation from @old to @new
 596 * @old: drm_mm_node to remove from the allocator
 597 * @new: drm_mm_node which should inherit @old's allocation
 598 *
 599 * This is useful for when drivers embed the drm_mm_node structure and hence
 600 * can't move allocations by reassigning pointers. It's a combination of remove
 601 * and insert with the guarantee that the allocation start will match.
 602 */
 603void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 604{
 605        struct drm_mm *mm = old->mm;
 606
 607        DRM_MM_BUG_ON(!old->allocated);
 608
 609        *new = *old;
 610
 611        list_replace(&old->node_list, &new->node_list);
 612        rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 613
 614        if (drm_mm_hole_follows(old)) {
 615                list_replace(&old->hole_stack, &new->hole_stack);
 616                rb_replace_node_cached(&old->rb_hole_size,
 617                                       &new->rb_hole_size,
 618                                       &mm->holes_size);
 619                rb_replace_node(&old->rb_hole_addr,
 620                                &new->rb_hole_addr,
 621                                &mm->holes_addr);
 622        }
 623
 624        old->allocated = false;
 625        new->allocated = true;
 626}
 627EXPORT_SYMBOL(drm_mm_replace_node);
 628
 629/**
 630 * DOC: lru scan roster
 631 *
 632 * Very often GPUs need to have continuous allocations for a given object. When
 633 * evicting objects to make space for a new one it is therefore not most
 634 * efficient when we simply start to select all objects from the tail of an LRU
 635 * until there's a suitable hole: Especially for big objects or nodes that
 636 * otherwise have special allocation constraints there's a good chance we evict
 637 * lots of (smaller) objects unnecessarily.
 638 *
 639 * The DRM range allocator supports this use-case through the scanning
 640 * interfaces. First a scan operation needs to be initialized with
 641 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
 642 * objects to the roster, probably by walking an LRU list, but this can be
 643 * freely implemented. Eviction candiates are added using
 644 * drm_mm_scan_add_block() until a suitable hole is found or there are no
 645 * further evictable objects. Eviction roster metadata is tracked in &struct
 646 * drm_mm_scan.
 647 *
 648 * The driver must walk through all objects again in exactly the reverse
 649 * order to restore the allocator state. Note that while the allocator is used
 650 * in the scan mode no other operation is allowed.
 651 *
 652 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
 653 * reported true) in the scan, and any overlapping nodes after color adjustment
 654 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
 655 * since freeing a node is also O(1) the overall complexity is
 656 * O(scanned_objects). So like the free stack which needs to be walked before a
 657 * scan operation even begins this is linear in the number of objects. It
 658 * doesn't seem to hurt too badly.
 659 */
 660
 661/**
 662 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 663 * @scan: scan state
 664 * @mm: drm_mm to scan
 665 * @size: size of the allocation
 666 * @alignment: alignment of the allocation
 667 * @color: opaque tag value to use for the allocation
 668 * @start: start of the allowed range for the allocation
 669 * @end: end of the allowed range for the allocation
 670 * @mode: fine-tune the allocation search and placement
 671 *
 672 * This simply sets up the scanning routines with the parameters for the desired
 673 * hole.
 674 *
 675 * Warning:
 676 * As long as the scan list is non-empty, no other operations than
 677 * adding/removing nodes to/from the scan list are allowed.
 678 */
 679void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 680                                 struct drm_mm *mm,
 681                                 u64 size,
 682                                 u64 alignment,
 683                                 unsigned long color,
 684                                 u64 start,
 685                                 u64 end,
 686                                 enum drm_mm_insert_mode mode)
 687{
 688        DRM_MM_BUG_ON(start >= end);
 689        DRM_MM_BUG_ON(!size || size > end - start);
 690        DRM_MM_BUG_ON(mm->scan_active);
 691
 692        scan->mm = mm;
 693
 694        if (alignment <= 1)
 695                alignment = 0;
 696
 697        scan->color = color;
 698        scan->alignment = alignment;
 699        scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 700        scan->size = size;
 701        scan->mode = mode;
 702
 703        DRM_MM_BUG_ON(end <= start);
 704        scan->range_start = start;
 705        scan->range_end = end;
 706
 707        scan->hit_start = U64_MAX;
 708        scan->hit_end = 0;
 709}
 710EXPORT_SYMBOL(drm_mm_scan_init_with_range);
 711
 712/**
 713 * drm_mm_scan_add_block - add a node to the scan list
 714 * @scan: the active drm_mm scanner
 715 * @node: drm_mm_node to add
 716 *
 717 * Add a node to the scan list that might be freed to make space for the desired
 718 * hole.
 719 *
 720 * Returns:
 721 * True if a hole has been found, false otherwise.
 722 */
 723bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 724                           struct drm_mm_node *node)
 725{
 726        struct drm_mm *mm = scan->mm;
 727        struct drm_mm_node *hole;
 728        u64 hole_start, hole_end;
 729        u64 col_start, col_end;
 730        u64 adj_start, adj_end;
 731
 732        DRM_MM_BUG_ON(node->mm != mm);
 733        DRM_MM_BUG_ON(!node->allocated);
 734        DRM_MM_BUG_ON(node->scanned_block);
 735        node->scanned_block = true;
 736        mm->scan_active++;
 737
 738        /* Remove this block from the node_list so that we enlarge the hole
 739         * (distance between the end of our previous node and the start of
 740         * or next), without poisoning the link so that we can restore it
 741         * later in drm_mm_scan_remove_block().
 742         */
 743        hole = list_prev_entry(node, node_list);
 744        DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
 745        __list_del_entry(&node->node_list);
 746
 747        hole_start = __drm_mm_hole_node_start(hole);
 748        hole_end = __drm_mm_hole_node_end(hole);
 749
 750        col_start = hole_start;
 751        col_end = hole_end;
 752        if (mm->color_adjust)
 753                mm->color_adjust(hole, scan->color, &col_start, &col_end);
 754
 755        adj_start = max(col_start, scan->range_start);
 756        adj_end = min(col_end, scan->range_end);
 757        if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 758                return false;
 759
 760        if (scan->mode == DRM_MM_INSERT_HIGH)
 761                adj_start = adj_end - scan->size;
 762
 763        if (scan->alignment) {
 764                u64 rem;
 765
 766                if (likely(scan->remainder_mask))
 767                        rem = adj_start & scan->remainder_mask;
 768                else
 769                        div64_u64_rem(adj_start, scan->alignment, &rem);
 770                if (rem) {
 771                        adj_start -= rem;
 772                        if (scan->mode != DRM_MM_INSERT_HIGH)
 773                                adj_start += scan->alignment;
 774                        if (adj_start < max(col_start, scan->range_start) ||
 775                            min(col_end, scan->range_end) - adj_start < scan->size)
 776                                return false;
 777
 778                        if (adj_end <= adj_start ||
 779                            adj_end - adj_start < scan->size)
 780                                return false;
 781                }
 782        }
 783
 784        scan->hit_start = adj_start;
 785        scan->hit_end = adj_start + scan->size;
 786
 787        DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
 788        DRM_MM_BUG_ON(scan->hit_start < hole_start);
 789        DRM_MM_BUG_ON(scan->hit_end > hole_end);
 790
 791        return true;
 792}
 793EXPORT_SYMBOL(drm_mm_scan_add_block);
 794
 795/**
 796 * drm_mm_scan_remove_block - remove a node from the scan list
 797 * @scan: the active drm_mm scanner
 798 * @node: drm_mm_node to remove
 799 *
 800 * Nodes **must** be removed in exactly the reverse order from the scan list as
 801 * they have been added (e.g. using list_add() as they are added and then
 802 * list_for_each() over that eviction list to remove), otherwise the internal
 803 * state of the memory manager will be corrupted.
 804 *
 805 * When the scan list is empty, the selected memory nodes can be freed. An
 806 * immediately following drm_mm_insert_node_in_range_generic() or one of the
 807 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
 808 * the just freed block (because it's at the top of the free_stack list).
 809 *
 810 * Returns:
 811 * True if this block should be evicted, false otherwise. Will always
 812 * return false when no hole has been found.
 813 */
 814bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 815                              struct drm_mm_node *node)
 816{
 817        struct drm_mm_node *prev_node;
 818
 819        DRM_MM_BUG_ON(node->mm != scan->mm);
 820        DRM_MM_BUG_ON(!node->scanned_block);
 821        node->scanned_block = false;
 822
 823        DRM_MM_BUG_ON(!node->mm->scan_active);
 824        node->mm->scan_active--;
 825
 826        /* During drm_mm_scan_add_block() we decoupled this node leaving
 827         * its pointers intact. Now that the caller is walking back along
 828         * the eviction list we can restore this block into its rightful
 829         * place on the full node_list. To confirm that the caller is walking
 830         * backwards correctly we check that prev_node->next == node->next,
 831         * i.e. both believe the same node should be on the other side of the
 832         * hole.
 833         */
 834        prev_node = list_prev_entry(node, node_list);
 835        DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
 836                      list_next_entry(node, node_list));
 837        list_add(&node->node_list, &prev_node->node_list);
 838
 839        return (node->start + node->size > scan->hit_start &&
 840                node->start < scan->hit_end);
 841}
 842EXPORT_SYMBOL(drm_mm_scan_remove_block);
 843
 844/**
 845 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
 846 * @scan: drm_mm scan with target hole
 847 *
 848 * After completing an eviction scan and removing the selected nodes, we may
 849 * need to remove a few more nodes from either side of the target hole if
 850 * mm.color_adjust is being used.
 851 *
 852 * Returns:
 853 * A node to evict, or NULL if there are no overlapping nodes.
 854 */
 855struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 856{
 857        struct drm_mm *mm = scan->mm;
 858        struct drm_mm_node *hole;
 859        u64 hole_start, hole_end;
 860
 861        DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
 862
 863        if (!mm->color_adjust)
 864                return NULL;
 865
 866        /*
 867         * The hole found during scanning should ideally be the first element
 868         * in the hole_stack list, but due to side-effects in the driver it
 869         * may not be.
 870         */
 871        list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
 872                hole_start = __drm_mm_hole_node_start(hole);
 873                hole_end = hole_start + hole->hole_size;
 874
 875                if (hole_start <= scan->hit_start &&
 876                    hole_end >= scan->hit_end)
 877                        break;
 878        }
 879
 880        /* We should only be called after we found the hole previously */
 881        DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
 882        if (unlikely(&hole->hole_stack == &mm->hole_stack))
 883                return NULL;
 884
 885        DRM_MM_BUG_ON(hole_start > scan->hit_start);
 886        DRM_MM_BUG_ON(hole_end < scan->hit_end);
 887
 888        mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
 889        if (hole_start > scan->hit_start)
 890                return hole;
 891        if (hole_end < scan->hit_end)
 892                return list_next_entry(hole, node_list);
 893
 894        return NULL;
 895}
 896EXPORT_SYMBOL(drm_mm_scan_color_evict);
 897
 898/**
 899 * drm_mm_init - initialize a drm-mm allocator
 900 * @mm: the drm_mm structure to initialize
 901 * @start: start of the range managed by @mm
 902 * @size: end of the range managed by @mm
 903 *
 904 * Note that @mm must be cleared to 0 before calling this function.
 905 */
 906void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 907{
 908        DRM_MM_BUG_ON(start + size <= start);
 909
 910        mm->color_adjust = NULL;
 911
 912        INIT_LIST_HEAD(&mm->hole_stack);
 913        mm->interval_tree = RB_ROOT_CACHED;
 914        mm->holes_size = RB_ROOT_CACHED;
 915        mm->holes_addr = RB_ROOT;
 916
 917        /* Clever trick to avoid a special case in the free hole tracking. */
 918        INIT_LIST_HEAD(&mm->head_node.node_list);
 919        mm->head_node.allocated = false;
 920        mm->head_node.mm = mm;
 921        mm->head_node.start = start + size;
 922        mm->head_node.size = -size;
 923        add_hole(&mm->head_node);
 924
 925        mm->scan_active = 0;
 926}
 927EXPORT_SYMBOL(drm_mm_init);
 928
 929/**
 930 * drm_mm_takedown - clean up a drm_mm allocator
 931 * @mm: drm_mm allocator to clean up
 932 *
 933 * Note that it is a bug to call this function on an allocator which is not
 934 * clean.
 935 */
 936void drm_mm_takedown(struct drm_mm *mm)
 937{
 938        if (WARN(!drm_mm_clean(mm),
 939                 "Memory manager not clean during takedown.\n"))
 940                show_leaks(mm);
 941}
 942EXPORT_SYMBOL(drm_mm_takedown);
 943
 944static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 945{
 946        u64 start, size;
 947
 948        size = entry->hole_size;
 949        if (size) {
 950                start = drm_mm_hole_node_start(entry);
 951                drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
 952                           start, start + size, size);
 953        }
 954
 955        return size;
 956}
 957/**
 958 * drm_mm_print - print allocator state
 959 * @mm: drm_mm allocator to print
 960 * @p: DRM printer to use
 961 */
 962void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
 963{
 964        const struct drm_mm_node *entry;
 965        u64 total_used = 0, total_free = 0, total = 0;
 966
 967        total_free += drm_mm_dump_hole(p, &mm->head_node);
 968
 969        drm_mm_for_each_node(entry, mm) {
 970                drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
 971                           entry->start + entry->size, entry->size);
 972                total_used += entry->size;
 973                total_free += drm_mm_dump_hole(p, entry);
 974        }
 975        total = total_free + total_used;
 976
 977        drm_printf(p, "total: %llu, used %llu free %llu\n", total,
 978                   total_used, total_free);
 979}
 980EXPORT_SYMBOL(drm_mm_print);
 981